result.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. """
  2. celery.result
  3. =============
  4. Task results/state, and result groups.
  5. """
  6. from __future__ import absolute_import
  7. from __future__ import with_statement
  8. import time
  9. from copy import copy
  10. from itertools import imap
  11. from . import current_app
  12. from . import states
  13. from .app import app_or_default
  14. from .exceptions import TimeoutError
  15. from .registry import _unpickle_task
  16. from .utils.compat import OrderedDict
  17. __all__ = ["BaseAsyncResult", "AsyncResult", "ResultSet",
  18. "TaskSetResult", "EagerResult"]
  19. def _unpickle_result(task_id, task_name):
  20. return _unpickle_task(task_name).AsyncResult(task_id)
  21. class BaseAsyncResult(object):
  22. """Base class for pending result, supports custom task result backend.
  23. :param task_id: see :attr:`task_id`.
  24. :param backend: see :attr:`backend`.
  25. """
  26. #: Error raised for timeouts.
  27. TimeoutError = TimeoutError
  28. #: The task uuid.
  29. task_id = None
  30. #: The task result backend to use.
  31. backend = None
  32. def __init__(self, task_id, backend, task_name=None, app=None):
  33. self.app = app_or_default(app)
  34. self.task_id = task_id
  35. self.backend = backend
  36. self.task_name = task_name
  37. def forget(self):
  38. """Forget about (and possibly remove the result of) this task."""
  39. self.backend.forget(self.task_id)
  40. def revoke(self, connection=None, connect_timeout=None):
  41. """Send revoke signal to all workers.
  42. Any worker receiving the task, or having reserved the
  43. task, *must* ignore it.
  44. """
  45. self.app.control.revoke(self.task_id, connection=connection,
  46. connect_timeout=connect_timeout)
  47. def get(self, timeout=None, propagate=True, interval=0.5):
  48. """Wait until task is ready, and return its result.
  49. .. warning::
  50. Waiting for tasks within a task may lead to deadlocks.
  51. Please read :ref:`task-synchronous-subtasks`.
  52. :keyword timeout: How long to wait, in seconds, before the
  53. operation times out.
  54. :keyword propagate: Re-raise exception if the task failed.
  55. :keyword interval: Time to wait (in seconds) before retrying to
  56. retrieve the result. Note that this does not have any effect
  57. when using the AMQP result store backend, as it does not
  58. use polling.
  59. :raises celery.exceptions.TimeoutError: if `timeout` is not
  60. :const:`None` and the result does not arrive within `timeout`
  61. seconds.
  62. If the remote call raised an exception then that exception will
  63. be re-raised.
  64. """
  65. return self.backend.wait_for(self.task_id, timeout=timeout,
  66. propagate=propagate,
  67. interval=interval)
  68. def wait(self, *args, **kwargs):
  69. """Deprecated alias to :meth:`get`."""
  70. return self.get(*args, **kwargs)
  71. def ready(self):
  72. """Returns :const:`True` if the task has been executed.
  73. If the task is still running, pending, or is waiting
  74. for retry then :const:`False` is returned.
  75. """
  76. return self.status in self.backend.READY_STATES
  77. def successful(self):
  78. """Returns :const:`True` if the task executed successfully."""
  79. return self.status == states.SUCCESS
  80. def failed(self):
  81. """Returns :const:`True` if the task failed."""
  82. return self.status == states.FAILURE
  83. def __str__(self):
  84. """`str(self) -> self.task_id`"""
  85. return self.task_id
  86. def __hash__(self):
  87. """`hash(self) -> hash(self.task_id)`"""
  88. return hash(self.task_id)
  89. def __repr__(self):
  90. return "<AsyncResult: %s>" % self.task_id
  91. def __eq__(self, other):
  92. if isinstance(other, self.__class__):
  93. return self.task_id == other.task_id
  94. return other == self.task_id
  95. def __copy__(self):
  96. return self.__class__(self.task_id, backend=self.backend)
  97. def __reduce__(self):
  98. if self.task_name:
  99. return (_unpickle_result, (self.task_id, self.task_name))
  100. else:
  101. return (self.__class__, (self.task_id, self.backend,
  102. None, self.app))
  103. @property
  104. def result(self):
  105. """When the task has been executed, this contains the return value.
  106. If the task raised an exception, this will be the exception
  107. instance."""
  108. return self.backend.get_result(self.task_id)
  109. @property
  110. def info(self):
  111. """Get state metadata. Alias to :meth:`result`."""
  112. return self.result
  113. @property
  114. def traceback(self):
  115. """Get the traceback of a failed task."""
  116. return self.backend.get_traceback(self.task_id)
  117. @property
  118. def state(self):
  119. """The tasks current state.
  120. Possible values includes:
  121. *PENDING*
  122. The task is waiting for execution.
  123. *STARTED*
  124. The task has been started.
  125. *RETRY*
  126. The task is to be retried, possibly because of failure.
  127. *FAILURE*
  128. The task raised an exception, or has exceeded the retry limit.
  129. The :attr:`result` attribute then contains the
  130. exception raised by the task.
  131. *SUCCESS*
  132. The task executed successfully. The :attr:`result` attribute
  133. then contains the tasks return value.
  134. """
  135. return self.backend.get_status(self.task_id)
  136. @property
  137. def status(self):
  138. """Deprecated alias of :attr:`state`."""
  139. return self.state
  140. class AsyncResult(BaseAsyncResult):
  141. """Pending task result using the default backend.
  142. :param task_id: The task uuid.
  143. """
  144. #: Task result store backend to use.
  145. backend = None
  146. def __init__(self, task_id, backend=None, task_name=None, app=None):
  147. app = app_or_default(app)
  148. backend = backend or app.backend
  149. super(AsyncResult, self).__init__(task_id, backend,
  150. task_name=task_name, app=app)
  151. class ResultSet(object):
  152. """Working with more than one result.
  153. :param results: List of result instances.
  154. """
  155. #: List of results in in the set.
  156. results = None
  157. def __init__(self, results, app=None, **kwargs):
  158. self.app = app_or_default(app)
  159. self.results = results
  160. def add(self, result):
  161. """Add :class:`AsyncResult` as a new member of the set.
  162. Does nothing if the result is already a member.
  163. """
  164. if result not in self.results:
  165. self.results.append(result)
  166. def remove(self, result):
  167. """Removes result from the set; it must be a member.
  168. :raises KeyError: if the result is not a member.
  169. """
  170. if isinstance(result, basestring):
  171. result = AsyncResult(result)
  172. try:
  173. self.results.remove(result)
  174. except ValueError:
  175. raise KeyError(result)
  176. def discard(self, result):
  177. """Remove result from the set if it is a member.
  178. If it is not a member, do nothing.
  179. """
  180. try:
  181. self.remove(result)
  182. except KeyError:
  183. pass
  184. def update(self, results):
  185. """Update set with the union of itself and an iterable with
  186. results."""
  187. self.results.extend(r for r in results if r not in self.results)
  188. def clear(self):
  189. """Remove all results from this set."""
  190. self.results[:] = [] # don't create new list.
  191. def successful(self):
  192. """Was all of the tasks successful?
  193. :returns: :const:`True` if all of the tasks finished
  194. successfully (i.e. did not raise an exception).
  195. """
  196. return all(result.successful() for result in self.results)
  197. def failed(self):
  198. """Did any of the tasks fail?
  199. :returns: :const:`True` if any of the tasks failed.
  200. (i.e., raised an exception)
  201. """
  202. return any(result.failed() for result in self.results)
  203. def waiting(self):
  204. """Are any of the tasks incomplete?
  205. :returns: :const:`True` if any of the tasks is still
  206. waiting for execution.
  207. """
  208. return any(not result.ready() for result in self.results)
  209. def ready(self):
  210. """Did all of the tasks complete? (either by success of failure).
  211. :returns: :const:`True` if all of the tasks been
  212. executed.
  213. """
  214. return all(result.ready() for result in self.results)
  215. def completed_count(self):
  216. """Task completion count.
  217. :returns: the number of tasks completed.
  218. """
  219. return sum(imap(int, (result.successful() for result in self.results)))
  220. def forget(self):
  221. """Forget about (and possible remove the result of) all the tasks."""
  222. for result in self.results:
  223. result.forget()
  224. def revoke(self, connection=None, connect_timeout=None):
  225. """Revoke all tasks in the set."""
  226. with self.app.default_connection(connection, connect_timeout) as conn:
  227. for result in self.results:
  228. result.revoke(connection=conn)
  229. def __iter__(self):
  230. return self.iterate()
  231. def __getitem__(self, index):
  232. """`res[i] -> res.results[i]`"""
  233. return self.results[index]
  234. def iterate(self, timeout=None, propagate=True, interval=0.5):
  235. """Iterate over the return values of the tasks as they finish
  236. one by one.
  237. :raises: The exception if any of the tasks raised an exception.
  238. """
  239. elapsed = 0.0
  240. results = OrderedDict((result.task_id, copy(result))
  241. for result in self.results)
  242. while results:
  243. removed = set()
  244. for task_id, result in results.iteritems():
  245. if result.ready():
  246. yield result.get(timeout=timeout and timeout - elapsed,
  247. propagate=propagate)
  248. removed.add(task_id)
  249. else:
  250. if result.backend.subpolling_interval:
  251. time.sleep(result.backend.subpolling_interval)
  252. for task_id in removed:
  253. results.pop(task_id, None)
  254. time.sleep(interval)
  255. elapsed += interval
  256. if timeout and elapsed >= timeout:
  257. raise TimeoutError("The operation timed out")
  258. def join(self, timeout=None, propagate=True, interval=0.5):
  259. """Gathers the results of all tasks as a list in order.
  260. .. note::
  261. This can be an expensive operation for result store
  262. backends that must resort to polling (e.g. database).
  263. You should consider using :meth:`join_native` if your backend
  264. supports it.
  265. .. warning::
  266. Waiting for tasks within a task may lead to deadlocks.
  267. Please see :ref:`task-synchronous-subtasks`.
  268. :keyword timeout: The number of seconds to wait for results before
  269. the operation times out.
  270. :keyword propagate: If any of the tasks raises an exception, the
  271. exception will be re-raised.
  272. :keyword interval: Time to wait (in seconds) before retrying to
  273. retrieve a result from the set. Note that this
  274. does not have any effect when using the AMQP
  275. result store backend, as it does not use polling.
  276. :raises celery.exceptions.TimeoutError: if `timeout` is not
  277. :const:`None` and the operation takes longer than `timeout`
  278. seconds.
  279. """
  280. time_start = time.time()
  281. remaining = None
  282. results = []
  283. for result in self.results:
  284. remaining = None
  285. if timeout:
  286. remaining = timeout - (time.time() - time_start)
  287. if remaining <= 0.0:
  288. raise TimeoutError("join operation timed out")
  289. results.append(result.wait(timeout=remaining,
  290. propagate=propagate,
  291. interval=interval))
  292. return results
  293. def iter_native(self, timeout=None, interval=None):
  294. """Backend optimized version of :meth:`iterate`.
  295. .. versionadded:: 2.2
  296. Note that this does not support collecting the results
  297. for different task types using different backends.
  298. This is currently only supported by the AMQP, Redis and cache
  299. result backends.
  300. """
  301. backend = self.results[0].backend
  302. ids = [result.task_id for result in self.results]
  303. return backend.get_many(ids, timeout=timeout, interval=interval)
  304. def join_native(self, timeout=None, propagate=True, interval=0.5):
  305. """Backend optimized version of :meth:`join`.
  306. .. versionadded:: 2.2
  307. Note that this does not support collecting the results
  308. for different task types using different backends.
  309. This is currently only supported by the AMQP, Redis and cache
  310. result backends.
  311. """
  312. results = self.results
  313. acc = [None for _ in xrange(self.total)]
  314. for task_id, meta in self.iter_native(timeout=timeout,
  315. interval=interval):
  316. acc[results.index(task_id)] = meta["result"]
  317. return acc
  318. @property
  319. def total(self):
  320. """Total number of tasks in the set."""
  321. return len(self.results)
  322. @property
  323. def subtasks(self):
  324. """Deprecated alias to :attr:`results`."""
  325. return self.results
  326. class TaskSetResult(ResultSet):
  327. """An instance of this class is returned by
  328. `TaskSet`'s :meth:`~celery.task.TaskSet.apply_async` method.
  329. It enables inspection of the tasks state and return values as
  330. a single entity.
  331. :param taskset_id: The id of the taskset.
  332. :param results: List of result instances.
  333. """
  334. #: The UUID of the taskset.
  335. taskset_id = None
  336. #: List/iterator of results in the taskset
  337. results = None
  338. def __init__(self, taskset_id, results=None, **kwargs):
  339. self.taskset_id = taskset_id
  340. # XXX previously the "results" arg was named "subtasks".
  341. if "subtasks" in kwargs:
  342. results = kwargs["subtasks"]
  343. super(TaskSetResult, self).__init__(results, **kwargs)
  344. def save(self, backend=None):
  345. """Save taskset result for later retrieval using :meth:`restore`.
  346. Example::
  347. >>> result.save()
  348. >>> result = TaskSetResult.restore(taskset_id)
  349. """
  350. return (backend or self.app.backend).save_taskset(self.taskset_id,
  351. self)
  352. def delete(self, backend=None):
  353. """Remove this result if it was previously saved."""
  354. (backend or self.app.backend).delete_taskset(self.taskset_id)
  355. @classmethod
  356. def restore(self, taskset_id, backend=None):
  357. """Restore previously saved taskset result."""
  358. return (backend or current_app.backend).restore_taskset(taskset_id)
  359. def itersubtasks(self):
  360. """Depreacted. Use ``iter(self.results)`` instead."""
  361. return iter(self.results)
  362. def __reduce__(self):
  363. return (self.__class__, (self.taskset_id, self.results))
  364. class EagerResult(BaseAsyncResult):
  365. """Result that we know has already been executed."""
  366. TimeoutError = TimeoutError
  367. def __init__(self, task_id, ret_value, state, traceback=None):
  368. self.task_id = task_id
  369. self._result = ret_value
  370. self._state = state
  371. self._traceback = traceback
  372. def __reduce__(self):
  373. return (self.__class__, (self.task_id, self._result,
  374. self._state, self._traceback))
  375. def __copy__(self):
  376. cls, args = self.__reduce__()
  377. return cls(*args)
  378. def successful(self):
  379. """Returns :const:`True` if the task executed without failure."""
  380. return self.state == states.SUCCESS
  381. def ready(self):
  382. """Returns :const:`True` if the task has been executed."""
  383. return True
  384. def get(self, timeout=None, propagate=True, **kwargs):
  385. """Wait until the task has been executed and return its result."""
  386. if self.state == states.SUCCESS:
  387. return self.result
  388. elif self.state in states.PROPAGATE_STATES:
  389. if propagate:
  390. raise self.result
  391. return self.result
  392. def revoke(self):
  393. self._state = states.REVOKED
  394. def __repr__(self):
  395. return "<EagerResult: %s>" % self.task_id
  396. @property
  397. def result(self):
  398. """The tasks return value"""
  399. return self._result
  400. @property
  401. def state(self):
  402. """The tasks state."""
  403. return self._state
  404. @property
  405. def traceback(self):
  406. """The traceback if the task failed."""
  407. return self._traceback
  408. @property
  409. def status(self):
  410. """The tasks status (alias to :attr:`state`)."""
  411. return self._state