amqp.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. # -*- coding: utf-8 -*-
  2. from __future__ import absolute_import
  3. from __future__ import with_statement
  4. import socket
  5. import threading
  6. import time
  7. from itertools import count
  8. from kombu.entity import Exchange, Queue
  9. from kombu.messaging import Consumer, Producer
  10. from .. import states
  11. from ..exceptions import TimeoutError
  12. from .base import BaseDictBackend
  13. class BacklogLimitExceeded(Exception):
  14. """Too much state history to fast-forward."""
  15. def repair_uuid(s):
  16. # Historically the dashes in UUIDS are removed from AMQ entity names,
  17. # but there is no known reason to. Hopefully we'll be able to fix
  18. # this in v3.0.
  19. return "%s-%s-%s-%s-%s" % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
  20. class AMQPBackend(BaseDictBackend):
  21. """Publishes results by sending messages."""
  22. Exchange = Exchange
  23. Queue = Queue
  24. Consumer = Consumer
  25. Producer = Producer
  26. BacklogLimitExceeded = BacklogLimitExceeded
  27. supports_native_join = True
  28. def __init__(self, connection=None, exchange=None, exchange_type=None,
  29. persistent=None, serializer=None, auto_delete=True,
  30. **kwargs):
  31. super(AMQPBackend, self).__init__(**kwargs)
  32. conf = self.app.conf
  33. self._connection = connection
  34. self.queue_arguments = {}
  35. self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None
  36. else persistent)
  37. delivery_mode = persistent and "persistent" or "transient"
  38. exchange = exchange or conf.CELERY_RESULT_EXCHANGE
  39. exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
  40. self.exchange = self.Exchange(name=exchange,
  41. type=exchange_type,
  42. delivery_mode=delivery_mode,
  43. durable=self.persistent,
  44. auto_delete=auto_delete)
  45. self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
  46. self.auto_delete = auto_delete
  47. # AMQP_TASK_RESULT_EXPIRES setting is deprecated and will be
  48. # removed in version 3.0.
  49. dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
  50. self.expires = None
  51. if "expires" in kwargs:
  52. if kwargs["expires"] is not None:
  53. self.expires = self.prepare_expires(kwargs["expires"])
  54. else:
  55. self.expires = self.prepare_expires(dexpires)
  56. if self.expires:
  57. self.queue_arguments["x-expires"] = int(self.expires * 1000)
  58. self.mutex = threading.Lock()
  59. def _create_binding(self, task_id):
  60. name = task_id.replace("-", "")
  61. return self.Queue(name=name,
  62. exchange=self.exchange,
  63. routing_key=name,
  64. durable=self.persistent,
  65. auto_delete=self.auto_delete,
  66. queue_arguments=self.queue_arguments)
  67. def _create_producer(self, task_id, channel):
  68. self._create_binding(task_id)(channel).declare()
  69. return self.Producer(channel, exchange=self.exchange,
  70. routing_key=task_id.replace("-", ""),
  71. serializer=self.serializer)
  72. def _create_consumer(self, bindings, channel):
  73. return self.Consumer(channel, bindings, no_ack=True)
  74. def _publish_result(self, connection, task_id, meta):
  75. # cache single channel
  76. if connection._default_channel is not None and \
  77. connection._default_channel.connection is None:
  78. connection.maybe_close_channel(connection._default_channel)
  79. channel = connection.default_channel
  80. self._create_producer(task_id, channel).publish(meta)
  81. def revive(self, channel):
  82. pass
  83. def _store_result(self, task_id, result, status, traceback=None,
  84. max_retries=20, interval_start=0, interval_step=1,
  85. interval_max=1):
  86. """Send task return value and status."""
  87. with self.mutex:
  88. with self.app.pool.acquire(block=True) as conn:
  89. def errback(error, delay):
  90. print("Couldn't send result for %r: %r. Retry in %rs." % (
  91. task_id, error, delay))
  92. send = conn.ensure(self, self._publish_result,
  93. max_retries=max_retries,
  94. errback=errback,
  95. interval_start=interval_start,
  96. interval_step=interval_step,
  97. interval_max=interval_max)
  98. send(conn, task_id, {"task_id": task_id, "status": status,
  99. "result": self.encode_result(result, status),
  100. "traceback": traceback})
  101. return result
  102. def get_task_meta(self, task_id, cache=True):
  103. return self.poll(task_id)
  104. def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
  105. **kwargs):
  106. cached_meta = self._cache.get(task_id)
  107. if cache and cached_meta and \
  108. cached_meta["status"] in states.READY_STATES:
  109. meta = cached_meta
  110. else:
  111. try:
  112. meta = self.consume(task_id, timeout=timeout)
  113. except socket.timeout:
  114. raise TimeoutError("The operation timed out.")
  115. state = meta["status"]
  116. if state == states.SUCCESS:
  117. return meta["result"]
  118. elif state in states.PROPAGATE_STATES:
  119. if propagate:
  120. raise self.exception_to_python(meta["result"])
  121. return meta["result"]
  122. else:
  123. return self.wait_for(task_id, timeout, cache)
  124. def poll(self, task_id, backlog_limit=100):
  125. with self.app.pool.acquire_channel(block=True) as (_, channel):
  126. binding = self._create_binding(task_id)(channel)
  127. binding.declare()
  128. latest, acc = None, None
  129. for i in count(): # fast-forward
  130. latest, acc = acc, binding.get(no_ack=True)
  131. if not acc:
  132. break
  133. if i > backlog_limit:
  134. raise self.BacklogLimitExceeded(task_id)
  135. if latest:
  136. payload = self._cache[task_id] = latest.payload
  137. return payload
  138. elif task_id in self._cache: # use previously received state.
  139. return self._cache[task_id]
  140. return {"status": states.PENDING, "result": None}
  141. def drain_events(self, connection, consumer, timeout=None, now=time.time):
  142. wait = connection.drain_events
  143. results = {}
  144. def callback(meta, message):
  145. if meta["status"] in states.READY_STATES:
  146. uuid = repair_uuid(message.delivery_info["routing_key"])
  147. results[uuid] = meta
  148. consumer.callbacks[:] = [callback]
  149. time_start = now()
  150. while 1:
  151. # Total time spent may exceed a single call to wait()
  152. if timeout and now() - time_start >= timeout:
  153. raise socket.timeout()
  154. wait(timeout=timeout)
  155. if results: # got event on the wanted channel.
  156. break
  157. self._cache.update(results)
  158. return results
  159. def consume(self, task_id, timeout=None):
  160. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  161. binding = self._create_binding(task_id)
  162. with self._create_consumer(binding, channel) as consumer:
  163. return self.drain_events(conn, consumer, timeout).values()[0]
  164. def get_many(self, task_ids, timeout=None, **kwargs):
  165. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  166. ids = set(task_ids)
  167. cached_ids = set()
  168. for task_id in ids:
  169. try:
  170. cached = self._cache[task_id]
  171. except KeyError:
  172. pass
  173. else:
  174. if cached["status"] in states.READY_STATES:
  175. yield task_id, cached
  176. cached_ids.add(task_id)
  177. ids ^= cached_ids
  178. bindings = [self._create_binding(task_id) for task_id in task_ids]
  179. with self._create_consumer(bindings, channel) as consumer:
  180. while ids:
  181. r = self.drain_events(conn, consumer, timeout)
  182. ids ^= set(r.keys())
  183. for ready_id, ready_meta in r.iteritems():
  184. yield ready_id, ready_meta
  185. def reload_task_result(self, task_id):
  186. raise NotImplementedError(
  187. "reload_task_result is not supported by this backend.")
  188. def reload_taskset_result(self, task_id):
  189. """Reload taskset result, even if it has been previously fetched."""
  190. raise NotImplementedError(
  191. "reload_taskset_result is not supported by this backend.")
  192. def save_taskset(self, taskset_id, result):
  193. raise NotImplementedError(
  194. "save_taskset is not supported by this backend.")
  195. def restore_taskset(self, taskset_id, cache=True):
  196. raise NotImplementedError(
  197. "restore_taskset is not supported by this backend.")
  198. def delete_taskset(self, taskset_id):
  199. raise NotImplementedError(
  200. "delete_taskset is not supported by this backend.")
  201. def __reduce__(self, args=(), kwargs={}):
  202. kwargs.update(
  203. dict(connection=self._connection,
  204. exchange=self.exchange.name,
  205. exchange_type=self.exchange.type,
  206. persistent=self.persistent,
  207. serializer=self.serializer,
  208. auto_delete=self.auto_delete,
  209. expires=self.expires))
  210. return super(AMQPBackend, self).__reduce__(args, kwargs)