|
@@ -1,8 +1,7 @@
|
|
|
import sys
|
|
|
import warnings
|
|
|
|
|
|
-from celery import conf
|
|
|
-from celery.backends import default_backend
|
|
|
+from celery.defaults import default_app
|
|
|
from celery.exceptions import MaxRetriesExceededError, RetryTaskError
|
|
|
from celery.execute import apply_async, apply
|
|
|
from celery.log import setup_task_logger
|
|
@@ -71,493 +70,510 @@ class TaskType(type):
|
|
|
return tasks[task_name].__class__
|
|
|
|
|
|
|
|
|
-class Task(object):
|
|
|
- """A celery task.
|
|
|
+def create_task_cls(app):
|
|
|
|
|
|
- All subclasses of :class:`Task` must define the :meth:`run` method,
|
|
|
- which is the actual method the ``celery`` daemon executes.
|
|
|
|
|
|
- The :meth:`run` method can take use of the default keyword arguments,
|
|
|
- as listed in the :meth:`run` documentation.
|
|
|
+ class Task(object):
|
|
|
+ """A celery task.
|
|
|
|
|
|
- The resulting class is callable, which if called will apply the
|
|
|
- :meth:`run` method.
|
|
|
+ All subclasses of :class:`Task` must define the :meth:`run` method,
|
|
|
+ which is the actual method the ``celery`` daemon executes.
|
|
|
|
|
|
- .. attribute:: name
|
|
|
+ The :meth:`run` method can take use of the default keyword arguments,
|
|
|
+ as listed in the :meth:`run` documentation.
|
|
|
|
|
|
- Name of the task.
|
|
|
+ The resulting class is callable, which if called will apply the
|
|
|
+ :meth:`run` method.
|
|
|
|
|
|
- .. attribute:: abstract
|
|
|
+ .. attribute:: name
|
|
|
|
|
|
- If ``True`` the task is an abstract base class.
|
|
|
+ Name of the task.
|
|
|
|
|
|
- .. attribute:: type
|
|
|
+ .. attribute:: abstract
|
|
|
|
|
|
- The type of task, currently this can be ``regular``, or ``periodic``,
|
|
|
- however if you want a periodic task, you should subclass
|
|
|
- :class:`PeriodicTask` instead.
|
|
|
+ If ``True`` the task is an abstract base class.
|
|
|
|
|
|
- .. attribute:: queue
|
|
|
+ .. attribute:: type
|
|
|
|
|
|
- Select a destination queue for this task. The queue needs to exist
|
|
|
- in ``CELERY_QUEUES``. The ``routing_key``, ``exchange`` and
|
|
|
- ``exchange_type`` attributes will be ignored if this is set.
|
|
|
+ The type of task, currently unused.
|
|
|
|
|
|
- .. attribute:: routing_key
|
|
|
+ .. attribute:: queue
|
|
|
|
|
|
- Override the global default ``routing_key`` for this task.
|
|
|
+ Select a destination queue for this task. The queue needs to exist
|
|
|
+ in ``CELERY_QUEUES``. The ``routing_key``, ``exchange`` and
|
|
|
+ ``exchange_type`` attributes will be ignored if this is set.
|
|
|
|
|
|
- .. attribute:: exchange
|
|
|
+ .. attribute:: routing_key
|
|
|
|
|
|
- Override the global default ``exchange`` for this task.
|
|
|
+ Override the global default ``routing_key`` for this task.
|
|
|
|
|
|
- .. attribute:: exchange_type
|
|
|
+ .. attribute:: exchange
|
|
|
|
|
|
- Override the global default exchange type for this task.
|
|
|
+ Override the global default ``exchange`` for this task.
|
|
|
|
|
|
- .. attribute:: delivery_mode
|
|
|
+ .. attribute:: exchange_type
|
|
|
|
|
|
- Override the global default delivery mode for this task.
|
|
|
- By default this is set to ``2`` (persistent). You can change this
|
|
|
- to ``1`` to get non-persistent behavior, which means the messages
|
|
|
- are lost if the broker is restarted.
|
|
|
+ Override the global default exchange type for this task.
|
|
|
|
|
|
- .. attribute:: mandatory
|
|
|
+ .. attribute:: delivery_mode
|
|
|
|
|
|
- Mandatory message routing. An exception will be raised if the task
|
|
|
- can't be routed to a queue.
|
|
|
+ Override the global default delivery mode for this task.
|
|
|
+ By default this is set to ``2`` (persistent). You can change this
|
|
|
+ to ``1`` to get non-persistent behavior, which means the messages
|
|
|
+ are lost if the broker is restarted.
|
|
|
|
|
|
- .. attribute:: immediate:
|
|
|
+ .. attribute:: mandatory
|
|
|
|
|
|
- Request immediate delivery. An exception will be raised if the task
|
|
|
- can't be routed to a worker immediately.
|
|
|
+ Mandatory message routing. An exception will be raised if the task
|
|
|
+ can't be routed to a queue.
|
|
|
|
|
|
- .. attribute:: priority:
|
|
|
+ .. attribute:: immediate:
|
|
|
|
|
|
- The message priority. A number from ``0`` to ``9``, where ``0`` is the
|
|
|
- highest. Note that RabbitMQ doesn't support priorities yet.
|
|
|
+ Request immediate delivery. An exception will be raised if the task
|
|
|
+ can't be routed to a worker immediately.
|
|
|
|
|
|
- .. attribute:: max_retries
|
|
|
+ .. attribute:: priority:
|
|
|
|
|
|
- Maximum number of retries before giving up.
|
|
|
- If set to ``None``, it will never stop retrying.
|
|
|
+ The message priority. A number from ``0`` to ``9``, where ``0``
|
|
|
+ is the highest. Note that RabbitMQ doesn't support priorities yet.
|
|
|
|
|
|
- .. attribute:: default_retry_delay
|
|
|
+ .. attribute:: max_retries
|
|
|
|
|
|
- Default time in seconds before a retry of the task should be
|
|
|
- executed. Default is a 3 minute delay.
|
|
|
+ Maximum number of retries before giving up.
|
|
|
+ If set to ``None``, it will never stop retrying.
|
|
|
|
|
|
- .. attribute:: rate_limit
|
|
|
+ .. attribute:: default_retry_delay
|
|
|
|
|
|
- Set the rate limit for this task type, Examples: ``None`` (no rate
|
|
|
- limit), ``"100/s"`` (hundred tasks a second), ``"100/m"`` (hundred
|
|
|
- tasks a minute), ``"100/h"`` (hundred tasks an hour)
|
|
|
+ Default time in seconds before a retry of the task should be
|
|
|
+ executed. Default is a 3 minute delay.
|
|
|
|
|
|
- .. attribute:: ignore_result
|
|
|
+ .. attribute:: rate_limit
|
|
|
|
|
|
- Don't store the return value of this task.
|
|
|
+ Set the rate limit for this task type, Examples: ``None`` (no rate
|
|
|
+ limit), ``"100/s"`` (hundred tasks a second), ``"100/m"`` (hundred
|
|
|
+ tasks a minute), ``"100/h"`` (hundred tasks an hour)
|
|
|
|
|
|
- .. attribute:: store_errors_even_if_ignored
|
|
|
+ .. attribute:: ignore_result
|
|
|
|
|
|
- If true, errors will be stored even if the task is configured
|
|
|
- to ignore results.
|
|
|
+ Don't store the return value of this task.
|
|
|
|
|
|
- .. attribute:: send_error_emails
|
|
|
+ .. attribute:: store_errors_even_if_ignored
|
|
|
|
|
|
- If true, an e-mail will be sent to the admins whenever
|
|
|
- a task of this type raises an exception.
|
|
|
+ If true, errors will be stored even if the task is configured
|
|
|
+ to ignore results.
|
|
|
|
|
|
- .. attribute:: error_whitelist
|
|
|
+ .. attribute:: send_error_emails
|
|
|
|
|
|
- List of exception types to send error e-mails for.
|
|
|
+ If true, an e-mail will be sent to the admins whenever
|
|
|
+ a task of this type raises an exception.
|
|
|
|
|
|
- .. attribute:: serializer
|
|
|
+ .. attribute:: error_whitelist
|
|
|
|
|
|
- The name of a serializer that has been registered with
|
|
|
- :mod:`carrot.serialization.registry`. Example: ``"json"``.
|
|
|
+ List of exception types to send error e-mails for.
|
|
|
|
|
|
- .. attribute:: backend
|
|
|
+ .. attribute:: serializer
|
|
|
|
|
|
- The result store backend used for this task.
|
|
|
+ The name of a serializer that has been registered with
|
|
|
+ :mod:`carrot.serialization.registry`. Example: ``"json"``.
|
|
|
|
|
|
- .. attribute:: autoregister
|
|
|
+ .. attribute:: backend
|
|
|
|
|
|
- If ``True`` the task is automatically registered in the task
|
|
|
- registry, which is the default behaviour.
|
|
|
+ The result store backend used for this task.
|
|
|
|
|
|
- .. attribute:: track_started
|
|
|
+ .. attribute:: autoregister
|
|
|
|
|
|
- If ``True`` the task will report its status as "started"
|
|
|
- when the task is executed by a worker.
|
|
|
- The default value is ``False`` as the normal behaviour is to not
|
|
|
- report that level of granularity. Tasks are either pending, finished,
|
|
|
- or waiting to be retried. Having a "started" status can be useful for
|
|
|
- when there are long running tasks and there is a need to report which
|
|
|
- task is currently running.
|
|
|
+ If ``True`` the task is automatically registered in the task
|
|
|
+ registry, which is the default behaviour.
|
|
|
|
|
|
- The global default can be overridden by the ``CELERY_TRACK_STARTED``
|
|
|
- setting.
|
|
|
+ .. attribute:: track_started
|
|
|
|
|
|
- .. attribute:: acks_late
|
|
|
+ If ``True`` the task will report its status as "started"
|
|
|
+ when the task is executed by a worker.
|
|
|
+ The default value is ``False`` as the normal behaviour is to not
|
|
|
+ report that level of granularity. Tasks are either pending,
|
|
|
+ finished, or waiting to be retried.
|
|
|
|
|
|
- If set to ``True`` messages for this task will be acknowledged
|
|
|
- **after** the task has been executed, not *just before*, which is
|
|
|
- the default behavior.
|
|
|
+ Having a "started" status can be useful for when there are long
|
|
|
+ running tasks and there is a need to report which task is
|
|
|
+ currently running.
|
|
|
|
|
|
- Note that this means the task may be executed twice if the worker
|
|
|
- crashes in the middle of execution, which may be acceptable for some
|
|
|
- applications.
|
|
|
+ The global default can be overridden with the
|
|
|
+ ``CELERY_TRACK_STARTED`` setting.
|
|
|
|
|
|
- The global default can be overriden by the ``CELERY_ACKS_LATE``
|
|
|
- setting.
|
|
|
+ .. attribute:: acks_late
|
|
|
|
|
|
- """
|
|
|
- __metaclass__ = TaskType
|
|
|
-
|
|
|
- name = None
|
|
|
- abstract = True
|
|
|
- autoregister = True
|
|
|
- type = "regular"
|
|
|
-
|
|
|
- queue = None
|
|
|
- routing_key = None
|
|
|
- exchange = None
|
|
|
- exchange_type = conf.DEFAULT_EXCHANGE_TYPE
|
|
|
- delivery_mode = conf.DEFAULT_DELIVERY_MODE
|
|
|
- immediate = False
|
|
|
- mandatory = False
|
|
|
- priority = None
|
|
|
-
|
|
|
- ignore_result = conf.IGNORE_RESULT
|
|
|
- store_errors_even_if_ignored = conf.STORE_ERRORS_EVEN_IF_IGNORED
|
|
|
- send_error_emails = conf.CELERY_SEND_TASK_ERROR_EMAILS
|
|
|
- error_whitelist = conf.CELERY_TASK_ERROR_WHITELIST
|
|
|
- disable_error_emails = False # FIXME
|
|
|
- max_retries = 3
|
|
|
- default_retry_delay = 3 * 60
|
|
|
- serializer = conf.TASK_SERIALIZER
|
|
|
- rate_limit = conf.DEFAULT_RATE_LIMIT
|
|
|
- backend = default_backend
|
|
|
- track_started = conf.TRACK_STARTED
|
|
|
- acks_late = conf.ACKS_LATE
|
|
|
-
|
|
|
- MaxRetriesExceededError = MaxRetriesExceededError
|
|
|
-
|
|
|
- def __call__(self, *args, **kwargs):
|
|
|
- return self.run(*args, **kwargs)
|
|
|
-
|
|
|
- def __reduce__(self):
|
|
|
- return (_unpickle_task, (self.name, ), None)
|
|
|
-
|
|
|
- def run(self, *args, **kwargs):
|
|
|
- """The body of the task executed by the worker.
|
|
|
-
|
|
|
- The following standard keyword arguments are reserved and is passed
|
|
|
- by the worker if the function/method supports them:
|
|
|
-
|
|
|
- * task_id
|
|
|
- * task_name
|
|
|
- * task_retries
|
|
|
- * task_is_eager
|
|
|
- * logfile
|
|
|
- * loglevel
|
|
|
- * delivery_info
|
|
|
-
|
|
|
- Additional standard keyword arguments may be added in the future.
|
|
|
- To take these default arguments, the task can either list the ones
|
|
|
- it wants explicitly or just take an arbitrary list of keyword
|
|
|
- arguments (\*\*kwargs).
|
|
|
-
|
|
|
- """
|
|
|
- raise NotImplementedError("Tasks must define the run method.")
|
|
|
-
|
|
|
- @classmethod
|
|
|
- def get_logger(self, loglevel=None, logfile=None, **kwargs):
|
|
|
- """Get task-aware logger object.
|
|
|
-
|
|
|
- See :func:`celery.log.setup_task_logger`.
|
|
|
-
|
|
|
- """
|
|
|
- return setup_task_logger(loglevel=loglevel, logfile=logfile,
|
|
|
- task_kwargs=kwargs)
|
|
|
-
|
|
|
- @classmethod
|
|
|
- def establish_connection(self,
|
|
|
- connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
|
|
|
- """Establish a connection to the message broker."""
|
|
|
- return _establish_connection(connect_timeout=connect_timeout)
|
|
|
-
|
|
|
- @classmethod
|
|
|
- def get_publisher(self, connection=None, exchange=None,
|
|
|
- connect_timeout=conf.BROKER_CONNECTION_TIMEOUT,
|
|
|
- exchange_type=None):
|
|
|
- """Get a celery task message publisher.
|
|
|
-
|
|
|
- :rtype :class:`celery.messaging.TaskPublisher`:
|
|
|
-
|
|
|
- Please be sure to close the AMQP connection when you're done
|
|
|
- with this object, i.e.:
|
|
|
-
|
|
|
- >>> publisher = self.get_publisher()
|
|
|
- >>> # do something with publisher
|
|
|
- >>> publisher.connection.close()
|
|
|
-
|
|
|
- """
|
|
|
- if exchange is None:
|
|
|
- exchange = self.exchange
|
|
|
- if exchange_type is None:
|
|
|
- exchange_type = self.exchange_type
|
|
|
- connection = connection or self.establish_connection(connect_timeout)
|
|
|
- return TaskPublisher(connection=connection,
|
|
|
- exchange=exchange,
|
|
|
- exchange_type=exchange_type,
|
|
|
- routing_key=self.routing_key)
|
|
|
-
|
|
|
- @classmethod
|
|
|
- def get_consumer(self, connection=None,
|
|
|
- connect_timeout=conf.BROKER_CONNECTION_TIMEOUT):
|
|
|
- """Get a celery task message consumer.
|
|
|
-
|
|
|
- :rtype :class:`celery.messaging.TaskConsumer`:
|
|
|
-
|
|
|
- Please be sure to close the AMQP connection when you're done
|
|
|
- with this object. i.e.:
|
|
|
-
|
|
|
- >>> consumer = self.get_consumer()
|
|
|
- >>> # do something with consumer
|
|
|
- >>> consumer.connection.close()
|
|
|
-
|
|
|
- """
|
|
|
- connection = connection or self.establish_connection(connect_timeout)
|
|
|
- return TaskConsumer(connection=connection, exchange=self.exchange,
|
|
|
- routing_key=self.routing_key)
|
|
|
+ If set to ``True`` messages for this task will be acknowledged
|
|
|
+ **after** the task has been executed, not *just before*, which is
|
|
|
+ the default behavior.
|
|
|
|
|
|
- @classmethod
|
|
|
- def delay(self, *args, **kwargs):
|
|
|
- """Shortcut to :meth:`apply_async`, with star arguments,
|
|
|
- but doesn't support the extra options.
|
|
|
+ Note that this means the task may be executed twice if the worker
|
|
|
+ crashes in the middle of execution, which may be acceptable for some
|
|
|
+ applications.
|
|
|
|
|
|
- :param \*args: positional arguments passed on to the task.
|
|
|
- :param \*\*kwargs: keyword arguments passed on to the task.
|
|
|
-
|
|
|
- :returns :class:`celery.result.AsyncResult`:
|
|
|
-
|
|
|
- """
|
|
|
- return self.apply_async(args, kwargs)
|
|
|
-
|
|
|
- @classmethod
|
|
|
- def apply_async(self, args=None, kwargs=None, **options):
|
|
|
- """Delay this task for execution by the ``celery`` daemon(s).
|
|
|
-
|
|
|
- :param args: positional arguments passed on to the task.
|
|
|
- :param kwargs: keyword arguments passed on to the task.
|
|
|
- :keyword \*\*options: Any keyword arguments to pass on to
|
|
|
- :func:`celery.execute.apply_async`.
|
|
|
-
|
|
|
- See :func:`celery.execute.apply_async` for more information.
|
|
|
-
|
|
|
- :returns :class:`celery.result.AsyncResult`:
|
|
|
-
|
|
|
- """
|
|
|
- return apply_async(self, args, kwargs, **options)
|
|
|
-
|
|
|
- @classmethod
|
|
|
- def retry(self, args=None, kwargs=None, exc=None, throw=True, **options):
|
|
|
- """Retry the task.
|
|
|
-
|
|
|
- :param args: Positional arguments to retry with.
|
|
|
- :param kwargs: Keyword arguments to retry with.
|
|
|
- :keyword exc: Optional exception to raise instead of
|
|
|
- :exc:`~celery.exceptions.MaxRetriesExceededError` when the max
|
|
|
- restart limit has been exceeded.
|
|
|
- :keyword countdown: Time in seconds to delay the retry for.
|
|
|
- :keyword eta: Explicit time and date to run the retry at (must be a
|
|
|
- :class:`datetime.datetime` instance).
|
|
|
- :keyword \*\*options: Any extra options to pass on to
|
|
|
- meth:`apply_async`. See :func:`celery.execute.apply_async`.
|
|
|
- :keyword throw: If this is ``False``, do not raise the
|
|
|
- :exc:`~celery.exceptions.RetryTaskError` exception,
|
|
|
- that tells the worker to mark the task as being retried.
|
|
|
- Note that this means the task will be marked as failed
|
|
|
- if the task raises an exception, or successful if it
|
|
|
- returns.
|
|
|
-
|
|
|
- :raises celery.exceptions.RetryTaskError: To tell the worker that the
|
|
|
- task has been re-sent for retry. This always happens, unless
|
|
|
- the ``throw`` keyword argument has been explicitly set
|
|
|
- to ``False``, and is considered normal operation.
|
|
|
-
|
|
|
- Example
|
|
|
-
|
|
|
- >>> class TwitterPostStatusTask(Task):
|
|
|
- ...
|
|
|
- ... def run(self, username, password, message, **kwargs):
|
|
|
- ... twitter = Twitter(username, password)
|
|
|
- ... try:
|
|
|
- ... twitter.post_status(message)
|
|
|
- ... except twitter.FailWhale, exc:
|
|
|
- ... # Retry in 5 minutes.
|
|
|
- ... self.retry([username, password, message], kwargs,
|
|
|
- ... countdown=60 * 5, exc=exc)
|
|
|
+ The global default can be overriden by the ``CELERY_ACKS_LATE``
|
|
|
+ setting.
|
|
|
|
|
|
"""
|
|
|
- if not kwargs:
|
|
|
- raise TypeError(
|
|
|
- "kwargs argument to retries can't be empty. "
|
|
|
- "Task must accept **kwargs, see http://bit.ly/cAx3Bg")
|
|
|
-
|
|
|
- delivery_info = kwargs.pop("delivery_info", {})
|
|
|
- options.setdefault("exchange", delivery_info.get("exchange"))
|
|
|
- options.setdefault("routing_key", delivery_info.get("routing_key"))
|
|
|
-
|
|
|
- options["retries"] = kwargs.pop("task_retries", 0) + 1
|
|
|
- options["task_id"] = kwargs.pop("task_id", None)
|
|
|
- options["countdown"] = options.get("countdown",
|
|
|
- self.default_retry_delay)
|
|
|
- max_exc = exc or self.MaxRetriesExceededError(
|
|
|
- "Can't retry %s[%s] args:%s kwargs:%s" % (
|
|
|
- self.name, options["task_id"], args, kwargs))
|
|
|
- max_retries = self.max_retries
|
|
|
- if max_retries is not None and options["retries"] > max_retries:
|
|
|
- raise max_exc
|
|
|
-
|
|
|
- # If task was executed eagerly using apply(),
|
|
|
- # then the retry must also be executed eagerly.
|
|
|
- if kwargs.get("task_is_eager", False):
|
|
|
- result = self.apply(args=args, kwargs=kwargs, **options)
|
|
|
- if isinstance(result, EagerResult):
|
|
|
- return result.get() # propogates exceptions.
|
|
|
- return result
|
|
|
-
|
|
|
- self.apply_async(args=args, kwargs=kwargs, **options)
|
|
|
-
|
|
|
- if throw:
|
|
|
- message = "Retry in %d seconds." % options["countdown"]
|
|
|
- raise RetryTaskError(message, exc)
|
|
|
-
|
|
|
- @classmethod
|
|
|
- def apply(self, args=None, kwargs=None, **options):
|
|
|
- """Execute this task locally, by blocking until the task
|
|
|
- has finished executing.
|
|
|
-
|
|
|
- :param args: positional arguments passed on to the task.
|
|
|
- :param kwargs: keyword arguments passed on to the task.
|
|
|
- :keyword throw: Re-raise task exceptions. Defaults to
|
|
|
- the ``CELERY_EAGER_PROPAGATES_EXCEPTIONS`` setting.
|
|
|
-
|
|
|
- :rtype :class:`celery.result.EagerResult`:
|
|
|
-
|
|
|
- See :func:`celery.execute.apply`.
|
|
|
+ __metaclass__ = TaskType
|
|
|
+
|
|
|
+ name = None
|
|
|
+ abstract = True
|
|
|
+ autoregister = True
|
|
|
+ type = "regular"
|
|
|
+
|
|
|
+ queue = None
|
|
|
+ routing_key = None
|
|
|
+ exchange = None
|
|
|
+ exchange_type = app.conf.CELERY_DEFAULT_EXCHANGE_TYPE
|
|
|
+ delivery_mode = app.conf.CELERY_DEFAULT_DELIVERY_MODE
|
|
|
+ immediate = False
|
|
|
+ mandatory = False
|
|
|
+ priority = None
|
|
|
+
|
|
|
+ ignore_result = app.conf.CELERY_IGNORE_RESULT
|
|
|
+ store_errors_even_if_ignored = \
|
|
|
+ app.conf.CELERY_STORE_ERRORS_EVEN_IF_IGNORED
|
|
|
+ send_error_emails = app.conf.CELERY_SEND_TASK_ERROR_EMAILS
|
|
|
+ error_whitelist = app.conf.CELERY_TASK_ERROR_WHITELIST
|
|
|
+ disable_error_emails = False # FIXME
|
|
|
+ max_retries = 3
|
|
|
+ default_retry_delay = 3 * 60
|
|
|
+ serializer = app.conf.CELERY_TASK_SERIALIZER
|
|
|
+ rate_limit = app.conf.CELERY_DEFAULT_RATE_LIMIT
|
|
|
+ backend = app.backend
|
|
|
+ track_started = app.conf.CELERY_TRACK_STARTED
|
|
|
+ acks_late = app.conf.CELERY_ACKS_LATE
|
|
|
+
|
|
|
+ MaxRetriesExceededError = MaxRetriesExceededError
|
|
|
+
|
|
|
+ def __call__(self, *args, **kwargs):
|
|
|
+ return self.run(*args, **kwargs)
|
|
|
+
|
|
|
+ def __reduce__(self):
|
|
|
+ return (_unpickle_task, (self.name, ), None)
|
|
|
+
|
|
|
+ def run(self, *args, **kwargs):
|
|
|
+ """The body of the task executed by the worker.
|
|
|
+
|
|
|
+ The following standard keyword arguments are reserved and is
|
|
|
+ automatically passed by the worker if the function/method
|
|
|
+ supports them:
|
|
|
+
|
|
|
+ * task_id
|
|
|
+ * task_name
|
|
|
+ * task_retries
|
|
|
+ * task_is_eager
|
|
|
+ * logfile
|
|
|
+ * loglevel
|
|
|
+ * delivery_info
|
|
|
+
|
|
|
+ Additional standard keyword arguments may be added in the future.
|
|
|
+ To take these default arguments, the task can either list the ones
|
|
|
+ it wants explicitly or just take an arbitrary list of keyword
|
|
|
+ arguments (\*\*kwargs).
|
|
|
+
|
|
|
+ """
|
|
|
+ raise NotImplementedError("Tasks must define the run method.")
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def get_logger(self, loglevel=None, logfile=None, **kwargs):
|
|
|
+ """Get task-aware logger object.
|
|
|
+
|
|
|
+ See :func:`celery.log.setup_task_logger`.
|
|
|
+
|
|
|
+ """
|
|
|
+ return setup_task_logger(loglevel=loglevel, logfile=logfile,
|
|
|
+ task_kwargs=kwargs)
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def establish_connection(self,
|
|
|
+ connect_timeout=app.conf.BROKER_CONNECTION_TIMEOUT):
|
|
|
+ """Establish a connection to the message broker."""
|
|
|
+ return _establish_connection(connect_timeout=connect_timeout)
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def get_publisher(self, connection=None, exchange=None,
|
|
|
+ connect_timeout=app.conf.BROKER_CONNECTION_TIMEOUT,
|
|
|
+ exchange_type=None):
|
|
|
+ """Get a celery task message publisher.
|
|
|
+
|
|
|
+ :rtype :class:`celery.messaging.TaskPublisher`:
|
|
|
+
|
|
|
+ Please be sure to close the AMQP connection when you're done
|
|
|
+ with this object, i.e.:
|
|
|
+
|
|
|
+ >>> publisher = self.get_publisher()
|
|
|
+ >>> # do something with publisher
|
|
|
+ >>> publisher.connection.close()
|
|
|
+
|
|
|
+ """
|
|
|
+ if exchange is None:
|
|
|
+ exchange = self.exchange
|
|
|
+ if exchange_type is None:
|
|
|
+ exchange_type = self.exchange_type
|
|
|
+ connection = connection or \
|
|
|
+ self.establish_connection(connect_timeout)
|
|
|
+ return TaskPublisher(connection=connection,
|
|
|
+ exchange=exchange,
|
|
|
+ exchange_type=exchange_type,
|
|
|
+ routing_key=self.routing_key)
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def get_consumer(self, connection=None,
|
|
|
+ connect_timeout=app.conf.BROKER_CONNECTION_TIMEOUT):
|
|
|
+ """Get a celery task message consumer.
|
|
|
+
|
|
|
+ :rtype :class:`celery.messaging.TaskConsumer`:
|
|
|
+
|
|
|
+ Please be sure to close the AMQP connection when you're done
|
|
|
+ with this object. i.e.:
|
|
|
+
|
|
|
+ >>> consumer = self.get_consumer()
|
|
|
+ >>> # do something with consumer
|
|
|
+ >>> consumer.connection.close()
|
|
|
+
|
|
|
+ """
|
|
|
+ connection = connection or \
|
|
|
+ self.establish_connection(connect_timeout)
|
|
|
+ return TaskConsumer(connection=connection,
|
|
|
+ exchange=self.exchange,
|
|
|
+ routing_key=self.routing_key)
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def delay(self, *args, **kwargs):
|
|
|
+ """Shortcut to :meth:`apply_async`, with star arguments,
|
|
|
+ but doesn't support the extra options.
|
|
|
+
|
|
|
+ :param \*args: positional arguments passed on to the task.
|
|
|
+ :param \*\*kwargs: keyword arguments passed on to the task.
|
|
|
+
|
|
|
+ :returns :class:`celery.result.AsyncResult`:
|
|
|
+
|
|
|
+ """
|
|
|
+ return self.apply_async(args, kwargs)
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def apply_async(self, args=None, kwargs=None, **options):
|
|
|
+ """Delay this task for execution by the ``celery`` daemon(s).
|
|
|
+
|
|
|
+ :param args: positional arguments passed on to the task.
|
|
|
+ :param kwargs: keyword arguments passed on to the task.
|
|
|
+ :keyword \*\*options: Any keyword arguments to pass on to
|
|
|
+ :func:`celery.execute.apply_async`.
|
|
|
+
|
|
|
+ See :func:`celery.execute.apply_async` for more information.
|
|
|
+
|
|
|
+ :returns :class:`celery.result.AsyncResult`:
|
|
|
+
|
|
|
+ """
|
|
|
+ return apply_async(self, args, kwargs, **options)
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def retry(self, args=None, kwargs=None, exc=None, throw=True,
|
|
|
+ **options):
|
|
|
+ """Retry the task.
|
|
|
+
|
|
|
+ :param args: Positional arguments to retry with.
|
|
|
+ :param kwargs: Keyword arguments to retry with.
|
|
|
+ :keyword exc: Optional exception to raise instead of
|
|
|
+ :exc:`~celery.exceptions.MaxRetriesExceededError` when the max
|
|
|
+ restart limit has been exceeded.
|
|
|
+ :keyword countdown: Time in seconds to delay the retry for.
|
|
|
+ :keyword eta: Explicit time and date to run the retry at
|
|
|
+ (must be a :class:`~datetime.datetime` instance).
|
|
|
+ :keyword \*\*options: Any extra options to pass on to
|
|
|
+ meth:`apply_async`. See :func:`celery.execute.apply_async`.
|
|
|
+ :keyword throw: If this is ``False``, do not raise the
|
|
|
+ :exc:`~celery.exceptions.RetryTaskError` exception,
|
|
|
+ that tells the worker to mark the task as being retried.
|
|
|
+ Note that this means the task will be marked as failed
|
|
|
+ if the task raises an exception, or successful if it
|
|
|
+ returns.
|
|
|
+
|
|
|
+ :raises celery.exceptions.RetryTaskError: To tell the worker that
|
|
|
+ the task has been re-sent for retry. This always happens,
|
|
|
+ unless the ``throw`` keyword argument has been explicitly set
|
|
|
+ to ``False``, and is considered normal operation.
|
|
|
+
|
|
|
+ Example
|
|
|
+
|
|
|
+ >>> class TwitterPostStatusTask(Task):
|
|
|
+ ...
|
|
|
+ ... def run(self, username, password, message, **kwargs):
|
|
|
+ ... twitter = Twitter(username, password)
|
|
|
+ ... try:
|
|
|
+ ... twitter.post_status(message)
|
|
|
+ ... except twitter.FailWhale, exc:
|
|
|
+ ... # Retry in 5 minutes.
|
|
|
+ ... self.retry([username, password, message],
|
|
|
+ ... kwargs,
|
|
|
+ ... countdown=60 * 5, exc=exc)
|
|
|
+
|
|
|
+ """
|
|
|
+ if not kwargs:
|
|
|
+ raise TypeError(
|
|
|
+ "kwargs argument to retries can't be empty. "
|
|
|
+ "Task must accept **kwargs, see http://bit.ly/cAx3Bg")
|
|
|
+
|
|
|
+ delivery_info = kwargs.pop("delivery_info", {})
|
|
|
+ options.setdefault("exchange", delivery_info.get("exchange"))
|
|
|
+ options.setdefault("routing_key", delivery_info.get("routing_key"))
|
|
|
+
|
|
|
+ options["retries"] = kwargs.pop("task_retries", 0) + 1
|
|
|
+ options["task_id"] = kwargs.pop("task_id", None)
|
|
|
+ options["countdown"] = options.get("countdown",
|
|
|
+ self.default_retry_delay)
|
|
|
+ max_exc = exc or self.MaxRetriesExceededError(
|
|
|
+ "Can't retry %s[%s] args:%s kwargs:%s" % (
|
|
|
+ self.name, options["task_id"], args, kwargs))
|
|
|
+ max_retries = self.max_retries
|
|
|
+ if max_retries is not None and options["retries"] > max_retries:
|
|
|
+ raise max_exc
|
|
|
+
|
|
|
+ # If task was executed eagerly using apply(),
|
|
|
+ # then the retry must also be executed eagerly.
|
|
|
+ if kwargs.get("task_is_eager", False):
|
|
|
+ result = self.apply(args=args, kwargs=kwargs, **options)
|
|
|
+ if isinstance(result, EagerResult):
|
|
|
+ return result.get() # propogates exceptions.
|
|
|
+ return result
|
|
|
|
|
|
- """
|
|
|
- return apply(self, args, kwargs, **options)
|
|
|
+ self.apply_async(args=args, kwargs=kwargs, **options)
|
|
|
+
|
|
|
+ if throw:
|
|
|
+ message = "Retry in %d seconds." % options["countdown"]
|
|
|
+ raise RetryTaskError(message, exc)
|
|
|
|
|
|
- @classmethod
|
|
|
- def AsyncResult(self, task_id):
|
|
|
- """Get AsyncResult instance for this kind of task.
|
|
|
+ @classmethod
|
|
|
+ def apply(self, args=None, kwargs=None, **options):
|
|
|
+ """Execute this task locally, by blocking until the task
|
|
|
+ has finished executing.
|
|
|
|
|
|
- :param task_id: Task id to get result for.
|
|
|
+ :param args: positional arguments passed on to the task.
|
|
|
+ :param kwargs: keyword arguments passed on to the task.
|
|
|
+ :keyword throw: Re-raise task exceptions. Defaults to
|
|
|
+ the ``CELERY_EAGER_PROPAGATES_EXCEPTIONS`` setting.
|
|
|
+
|
|
|
+ :rtype :class:`celery.result.EagerResult`:
|
|
|
+
|
|
|
+ See :func:`celery.execute.apply`.
|
|
|
+
|
|
|
+ """
|
|
|
+ return apply(self, args, kwargs, **options)
|
|
|
+
|
|
|
+ @classmethod
|
|
|
+ def AsyncResult(self, task_id):
|
|
|
+ """Get AsyncResult instance for this kind of task.
|
|
|
+
|
|
|
+ :param task_id: Task id to get result for.
|
|
|
+
|
|
|
+ """
|
|
|
+ return BaseAsyncResult(task_id, backend=self.backend)
|
|
|
+
|
|
|
+ def on_retry(self, exc, task_id, args, kwargs, einfo=None):
|
|
|
+ """Retry handler.
|
|
|
+
|
|
|
+ This is run by the worker when the task is to be retried.
|
|
|
+
|
|
|
+ :param exc: The exception sent to :meth:`retry`.
|
|
|
+ :param task_id: Unique id of the retried task.
|
|
|
+ :param args: Original arguments for the retried task.
|
|
|
+ :param kwargs: Original keyword arguments for the retried task.
|
|
|
|
|
|
- """
|
|
|
- return BaseAsyncResult(task_id, backend=self.backend)
|
|
|
+ :keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
|
|
|
+ instance, containing the traceback.
|
|
|
|
|
|
- def on_retry(self, exc, task_id, args, kwargs, einfo=None):
|
|
|
- """Retry handler.
|
|
|
+ The return value of this handler is ignored.
|
|
|
|
|
|
- This is run by the worker when the task is to be retried.
|
|
|
+ """
|
|
|
+ pass
|
|
|
|
|
|
- :param exc: The exception sent to :meth:`retry`.
|
|
|
- :param task_id: Unique id of the retried task.
|
|
|
- :param args: Original arguments for the retried task.
|
|
|
- :param kwargs: Original keyword arguments for the retried task.
|
|
|
+ def after_return(self, status, retval, task_id, args,
|
|
|
+ kwargs, einfo=None):
|
|
|
+ """Handler called after the task returns.
|
|
|
|
|
|
- :keyword einfo: :class:`~celery.datastructures.ExceptionInfo` instance,
|
|
|
- containing the traceback.
|
|
|
+ :param status: Current task state.
|
|
|
+ :param retval: Task return value/exception.
|
|
|
+ :param task_id: Unique id of the task.
|
|
|
+ :param args: Original arguments for the task that failed.
|
|
|
+ :param kwargs: Original keyword arguments for the task
|
|
|
+ that failed.
|
|
|
|
|
|
- The return value of this handler is ignored.
|
|
|
+ :keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
|
|
|
+ instance, containing the traceback (if any).
|
|
|
|
|
|
- """
|
|
|
- pass
|
|
|
+ The return value of this handler is ignored.
|
|
|
|
|
|
- def after_return(self, status, retval, task_id, args, kwargs, einfo=None):
|
|
|
- """Handler called after the task returns.
|
|
|
+ """
|
|
|
+ pass
|
|
|
|
|
|
- :param status: Current task state.
|
|
|
- :param retval: Task return value/exception.
|
|
|
- :param task_id: Unique id of the task.
|
|
|
- :param args: Original arguments for the task that failed.
|
|
|
- :param kwargs: Original keyword arguments for the task that failed.
|
|
|
+ def on_failure(self, exc, task_id, args, kwargs, einfo=None):
|
|
|
+ """Error handler.
|
|
|
|
|
|
- :keyword einfo: :class:`~celery.datastructures.ExceptionInfo` instance,
|
|
|
- containing the traceback (if any).
|
|
|
+ This is run by the worker when the task fails.
|
|
|
|
|
|
- The return value of this handler is ignored.
|
|
|
+ :param exc: The exception raised by the task.
|
|
|
+ :param task_id: Unique id of the failed task.
|
|
|
+ :param args: Original arguments for the task that failed.
|
|
|
+ :param kwargs: Original keyword arguments for the task
|
|
|
+ that failed.
|
|
|
|
|
|
- """
|
|
|
- pass
|
|
|
+ :keyword einfo: :class:`~celery.datastructures.ExceptionInfo`
|
|
|
+ instance, containing the traceback.
|
|
|
|
|
|
- def on_failure(self, exc, task_id, args, kwargs, einfo=None):
|
|
|
- """Error handler.
|
|
|
+ The return value of this handler is ignored.
|
|
|
|
|
|
- This is run by the worker when the task fails.
|
|
|
+ """
|
|
|
+ pass
|
|
|
|
|
|
- :param exc: The exception raised by the task.
|
|
|
- :param task_id: Unique id of the failed task.
|
|
|
- :param args: Original arguments for the task that failed.
|
|
|
- :param kwargs: Original keyword arguments for the task that failed.
|
|
|
+ def on_success(self, retval, task_id, args, kwargs):
|
|
|
+ """Success handler.
|
|
|
|
|
|
- :keyword einfo: :class:`~celery.datastructures.ExceptionInfo` instance,
|
|
|
- containing the traceback.
|
|
|
+ Run by the worker if the task executes successfully.
|
|
|
|
|
|
- The return value of this handler is ignored.
|
|
|
+ :param retval: The return value of the task.
|
|
|
+ :param task_id: Unique id of the executed task.
|
|
|
+ :param args: Original arguments for the executed task.
|
|
|
+ :param kwargs: Original keyword arguments for the executed task.
|
|
|
|
|
|
- """
|
|
|
- pass
|
|
|
+ The return value of this handler is ignored.
|
|
|
|
|
|
- def on_success(self, retval, task_id, args, kwargs):
|
|
|
- """Success handler.
|
|
|
+ """
|
|
|
+ pass
|
|
|
|
|
|
- Run by the worker if the task executes successfully.
|
|
|
+ def execute(self, wrapper, pool, loglevel, logfile):
|
|
|
+ """The method the worker calls to execute the task.
|
|
|
|
|
|
- :param retval: The return value of the task.
|
|
|
- :param task_id: Unique id of the executed task.
|
|
|
- :param args: Original arguments for the executed task.
|
|
|
- :param kwargs: Original keyword arguments for the executed task.
|
|
|
+ :param wrapper: A :class:`~celery.worker.job.TaskRequest`.
|
|
|
+ :param pool: A task pool.
|
|
|
+ :param loglevel: Current loglevel.
|
|
|
+ :param logfile: Name of the currently used logfile.
|
|
|
|
|
|
- The return value of this handler is ignored.
|
|
|
+ """
|
|
|
+ wrapper.execute_using_pool(pool, loglevel, logfile)
|
|
|
|
|
|
- """
|
|
|
- pass
|
|
|
-
|
|
|
- def execute(self, wrapper, pool, loglevel, logfile):
|
|
|
- """The method the worker calls to execute the task.
|
|
|
+ def __repr__(self):
|
|
|
+ """repr(task)"""
|
|
|
+ try:
|
|
|
+ kind = self.__class__.mro()[1].__name__
|
|
|
+ except (AttributeError, IndexError):
|
|
|
+ kind = "%s(Task)" % self.__class__.__name__
|
|
|
+ return "<%s: %s (%s)>" % (kind, self.name, self.type)
|
|
|
|
|
|
- :param wrapper: A :class:`~celery.worker.job.TaskRequest`.
|
|
|
- :param pool: A task pool.
|
|
|
- :param loglevel: Current loglevel.
|
|
|
- :param logfile: Name of the currently used logfile.
|
|
|
-
|
|
|
- """
|
|
|
- wrapper.execute_using_pool(pool, loglevel, logfile)
|
|
|
+ @classmethod
|
|
|
+ def subtask(cls, *args, **kwargs):
|
|
|
+ """Returns a :class:`~celery.task.sets.subtask` object for
|
|
|
+ this task that wraps arguments and execution options
|
|
|
+ for a single task invocation."""
|
|
|
+ return subtask(cls, *args, **kwargs)
|
|
|
|
|
|
- def __repr__(self):
|
|
|
- """repr(task)"""
|
|
|
- try:
|
|
|
- kind = self.__class__.mro()[1].__name__
|
|
|
- except (AttributeError, IndexError):
|
|
|
- kind = "%s(Task)" % self.__class__.__name__
|
|
|
- return "<%s: %s (%s)>" % (kind, self.name, self.type)
|
|
|
+ @property
|
|
|
+ def __name__(self):
|
|
|
+ return self.__class__.__name__
|
|
|
|
|
|
- @classmethod
|
|
|
- def subtask(cls, *args, **kwargs):
|
|
|
- """Returns a :class:`~celery.task.sets.subtask` object for
|
|
|
- this task that wraps arguments and execution options
|
|
|
- for a single task invocation."""
|
|
|
- return subtask(cls, *args, **kwargs)
|
|
|
+ return Task
|
|
|
|
|
|
- @property
|
|
|
- def __name__(self):
|
|
|
- return self.__class__.__name__
|
|
|
+Task = create_task_cls(default_app)
|
|
|
|
|
|
|
|
|
class PeriodicTask(Task):
|
|
@@ -633,7 +649,7 @@ class PeriodicTask(Task):
|
|
|
|
|
|
# For backward compatibility, add the periodic task to the
|
|
|
# configuration schedule instead.
|
|
|
- conf.CELERYBEAT_SCHEDULE[self.name] = {
|
|
|
+ default_app.conf.CELERYBEAT_SCHEDULE[self.name] = {
|
|
|
"task": self.name,
|
|
|
"schedule": self.run_every,
|
|
|
"args": (),
|