amqp.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. # -*- coding: utf-8 -*-
  2. from __future__ import absolute_import
  3. from __future__ import with_statement
  4. import socket
  5. import threading
  6. import time
  7. from kombu.entity import Exchange, Queue
  8. from kombu.messaging import Consumer, Producer
  9. from celery import states
  10. from celery.exceptions import TimeoutError
  11. from celery.utils.log import get_logger
  12. from .base import BaseDictBackend
  13. logger = get_logger(__name__)
  14. class BacklogLimitExceeded(Exception):
  15. """Too much state history to fast-forward."""
  16. def repair_uuid(s):
  17. # Historically the dashes in UUIDS are removed from AMQ entity names,
  18. # but there is no known reason to. Hopefully we'll be able to fix
  19. # this in v3.0.
  20. return "%s-%s-%s-%s-%s" % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
  21. class AMQPBackend(BaseDictBackend):
  22. """Publishes results by sending messages."""
  23. Exchange = Exchange
  24. Queue = Queue
  25. Consumer = Consumer
  26. Producer = Producer
  27. BacklogLimitExceeded = BacklogLimitExceeded
  28. supports_native_join = True
  29. retry_policy = {
  30. "max_retries": 20,
  31. "interval_start": 0,
  32. "interval_step": 1,
  33. "interval_max": 1,
  34. }
  35. def __init__(self, connection=None, exchange=None, exchange_type=None,
  36. persistent=None, serializer=None, auto_delete=True,
  37. **kwargs):
  38. super(AMQPBackend, self).__init__(**kwargs)
  39. conf = self.app.conf
  40. self._connection = connection
  41. self.queue_arguments = {}
  42. self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None
  43. else persistent)
  44. delivery_mode = persistent and "persistent" or "transient"
  45. exchange = exchange or conf.CELERY_RESULT_EXCHANGE
  46. exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
  47. self.exchange = self.Exchange(name=exchange,
  48. type=exchange_type,
  49. delivery_mode=delivery_mode,
  50. durable=self.persistent,
  51. auto_delete=False)
  52. self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
  53. self.auto_delete = auto_delete
  54. # AMQP_TASK_RESULT_EXPIRES setting is deprecated and will be
  55. # removed in version 3.0.
  56. dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
  57. self.expires = None
  58. if "expires" in kwargs:
  59. if kwargs["expires"] is not None:
  60. self.expires = self.prepare_expires(kwargs["expires"])
  61. else:
  62. self.expires = self.prepare_expires(dexpires)
  63. if self.expires:
  64. self.queue_arguments["x-expires"] = int(self.expires * 1000)
  65. self.mutex = threading.Lock()
  66. def _create_binding(self, task_id):
  67. name = task_id.replace("-", "")
  68. return self.Queue(name=name,
  69. exchange=self.exchange,
  70. routing_key=name,
  71. durable=self.persistent,
  72. auto_delete=self.auto_delete,
  73. queue_arguments=self.queue_arguments)
  74. def revive(self, channel):
  75. pass
  76. def _store_result(self, task_id, result, status, traceback=None):
  77. """Send task return value and status."""
  78. with self.mutex:
  79. with self.app.amqp.producer_pool.acquire(block=True) as pub:
  80. pub.publish({"task_id": task_id, "status": status,
  81. "result": self.encode_result(result, status),
  82. "traceback": traceback,
  83. "children": self.current_task_children()},
  84. exchange=self.exchange,
  85. routing_key=task_id.replace("-", ""),
  86. serializer=self.serializer,
  87. retry=True, retry_policy=self.retry_policy,
  88. declare=[self._create_binding(task_id)])
  89. return result
  90. def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
  91. **kwargs):
  92. cached_meta = self._cache.get(task_id)
  93. if cache and cached_meta and \
  94. cached_meta["status"] in states.READY_STATES:
  95. meta = cached_meta
  96. else:
  97. try:
  98. meta = self.consume(task_id, timeout=timeout)
  99. except socket.timeout:
  100. raise TimeoutError("The operation timed out.")
  101. state = meta["status"]
  102. if state == states.SUCCESS:
  103. return meta["result"]
  104. elif state in states.PROPAGATE_STATES:
  105. if propagate:
  106. raise self.exception_to_python(meta["result"])
  107. return meta["result"]
  108. else:
  109. return self.wait_for(task_id, timeout, cache)
  110. def get_task_meta(self, task_id, backlog_limit=1000):
  111. # Polling and using basic_get
  112. with self.app.pool.acquire_channel(block=True) as (_, channel):
  113. binding = self._create_binding(task_id)(channel)
  114. binding.declare()
  115. latest, acc = None, None
  116. for i in xrange(backlog_limit):
  117. latest, acc = acc, binding.get(no_ack=True)
  118. if not acc: # no more messages
  119. break
  120. else:
  121. raise self.BacklogLimitExceeded(task_id)
  122. if latest:
  123. # new state to report
  124. payload = self._cache[task_id] = latest.payload
  125. return payload
  126. else:
  127. # no new state, use previous
  128. try:
  129. return self._cache[task_id]
  130. except KeyError:
  131. # result probably pending.
  132. return {"status": states.PENDING, "result": None}
  133. poll = get_task_meta # XXX compat
  134. def drain_events(self, connection, consumer, timeout=None, now=time.time):
  135. wait = connection.drain_events
  136. results = {}
  137. def callback(meta, message):
  138. if meta["status"] in states.READY_STATES:
  139. uuid = repair_uuid(message.delivery_info["routing_key"])
  140. results[uuid] = meta
  141. consumer.callbacks[:] = [callback]
  142. time_start = now()
  143. while 1:
  144. # Total time spent may exceed a single call to wait()
  145. if timeout and now() - time_start >= timeout:
  146. raise socket.timeout()
  147. wait(timeout=timeout)
  148. if results: # got event on the wanted channel.
  149. break
  150. self._cache.update(results)
  151. return results
  152. def consume(self, task_id, timeout=None):
  153. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  154. binding = self._create_binding(task_id)
  155. with self.Consumer(channel, binding, no_ack=True) as consumer:
  156. return self.drain_events(conn, consumer, timeout).values()[0]
  157. def get_many(self, task_ids, timeout=None, **kwargs):
  158. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  159. ids = set(task_ids)
  160. cached_ids = set()
  161. for task_id in ids:
  162. try:
  163. cached = self._cache[task_id]
  164. except KeyError:
  165. pass
  166. else:
  167. if cached["status"] in states.READY_STATES:
  168. yield task_id, cached
  169. cached_ids.add(task_id)
  170. ids ^= cached_ids
  171. bindings = [self._create_binding(task_id) for task_id in task_ids]
  172. with self.Consumer(channel, bindings, no_ack=True) as consumer:
  173. while ids:
  174. r = self.drain_events(conn, consumer, timeout)
  175. ids ^= set(r)
  176. for ready_id, ready_meta in r.iteritems():
  177. yield ready_id, ready_meta
  178. def reload_task_result(self, task_id):
  179. raise NotImplementedError(
  180. "reload_task_result is not supported by this backend.")
  181. def reload_taskset_result(self, task_id):
  182. """Reload taskset result, even if it has been previously fetched."""
  183. raise NotImplementedError(
  184. "reload_taskset_result is not supported by this backend.")
  185. def save_taskset(self, taskset_id, result):
  186. raise NotImplementedError(
  187. "save_taskset is not supported by this backend.")
  188. def restore_taskset(self, taskset_id, cache=True):
  189. raise NotImplementedError(
  190. "restore_taskset is not supported by this backend.")
  191. def delete_taskset(self, taskset_id):
  192. raise NotImplementedError(
  193. "delete_taskset is not supported by this backend.")
  194. def __reduce__(self, args=(), kwargs={}):
  195. kwargs.update(connection=self._connection,
  196. exchange=self.exchange.name,
  197. exchange_type=self.exchange.type,
  198. persistent=self.persistent,
  199. serializer=self.serializer,
  200. auto_delete=self.auto_delete,
  201. expires=self.expires)
  202. return super(AMQPBackend, self).__reduce__(args, kwargs)