amqp.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314
  1. # -*- coding: utf-8 -*-
  2. """The old AMQP result backend, deprecated and replaced by the RPC backend."""
  3. from __future__ import absolute_import, unicode_literals
  4. import socket
  5. from collections import deque
  6. from operator import itemgetter
  7. from kombu import Consumer, Exchange, Producer, Queue
  8. from celery import states
  9. from celery.exceptions import TimeoutError
  10. from celery.five import monotonic, range
  11. from celery.utils import deprecated
  12. from celery.utils.log import get_logger
  13. from .base import BaseBackend
  14. __all__ = ('BacklogLimitExceeded', 'AMQPBackend')
  15. logger = get_logger(__name__)
  16. class BacklogLimitExceeded(Exception):
  17. """Too much state history to fast-forward."""
  18. def repair_uuid(s):
  19. # Historically the dashes in UUIDS are removed from AMQ entity names,
  20. # but there's no known reason to. Hopefully we'll be able to fix
  21. # this in v4.0.
  22. return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
  23. class NoCacheQueue(Queue):
  24. can_cache_declaration = False
  25. class AMQPBackend(BaseBackend):
  26. """The AMQP result backend.
  27. Deprecated: Please use the RPC backend or a persistent backend.
  28. """
  29. Exchange = Exchange
  30. Queue = NoCacheQueue
  31. Consumer = Consumer
  32. Producer = Producer
  33. BacklogLimitExceeded = BacklogLimitExceeded
  34. persistent = True
  35. supports_autoexpire = True
  36. supports_native_join = True
  37. retry_policy = {
  38. 'max_retries': 20,
  39. 'interval_start': 0,
  40. 'interval_step': 1,
  41. 'interval_max': 1,
  42. }
  43. def __init__(self, app, connection=None, exchange=None, exchange_type=None,
  44. persistent=None, serializer=None, auto_delete=True, **kwargs):
  45. deprecated.warn(
  46. 'The AMQP result backend', deprecation='4.0', removal='5.0',
  47. alternative='Please use RPC backend or a persistent backend.')
  48. super(AMQPBackend, self).__init__(app, **kwargs)
  49. conf = self.app.conf
  50. self._connection = connection
  51. self.persistent = self.prepare_persistent(persistent)
  52. self.delivery_mode = 2 if self.persistent else 1
  53. exchange = exchange or conf.result_exchange
  54. exchange_type = exchange_type or conf.result_exchange_type
  55. self.exchange = self._create_exchange(
  56. exchange, exchange_type, self.delivery_mode,
  57. )
  58. self.serializer = serializer or conf.result_serializer
  59. self.auto_delete = auto_delete
  60. def _create_exchange(self, name, type='direct', delivery_mode=2):
  61. return self.Exchange(name=name,
  62. type=type,
  63. delivery_mode=delivery_mode,
  64. durable=self.persistent,
  65. auto_delete=False)
  66. def _create_binding(self, task_id):
  67. name = self.rkey(task_id)
  68. return self.Queue(
  69. name=name,
  70. exchange=self.exchange,
  71. routing_key=name,
  72. durable=self.persistent,
  73. auto_delete=self.auto_delete,
  74. expires=self.expires,
  75. )
  76. def revive(self, channel):
  77. pass
  78. def rkey(self, task_id):
  79. return task_id.replace('-', '')
  80. def destination_for(self, task_id, request):
  81. if request:
  82. return self.rkey(task_id), request.correlation_id or task_id
  83. return self.rkey(task_id), task_id
  84. def store_result(self, task_id, result, state,
  85. traceback=None, request=None, **kwargs):
  86. """Send task return value and state."""
  87. routing_key, correlation_id = self.destination_for(task_id, request)
  88. if not routing_key:
  89. return
  90. with self.app.amqp.producer_pool.acquire(block=True) as producer:
  91. producer.publish(
  92. {'task_id': task_id, 'status': state,
  93. 'result': self.encode_result(result, state),
  94. 'traceback': traceback,
  95. 'children': self.current_task_children(request)},
  96. exchange=self.exchange,
  97. routing_key=routing_key,
  98. correlation_id=correlation_id,
  99. serializer=self.serializer,
  100. retry=True, retry_policy=self.retry_policy,
  101. declare=self.on_reply_declare(task_id),
  102. delivery_mode=self.delivery_mode,
  103. )
  104. return result
  105. def on_reply_declare(self, task_id):
  106. return [self._create_binding(task_id)]
  107. def wait_for(self, task_id, timeout=None, cache=True,
  108. no_ack=True, on_interval=None,
  109. READY_STATES=states.READY_STATES,
  110. PROPAGATE_STATES=states.PROPAGATE_STATES,
  111. **kwargs):
  112. cached_meta = self._cache.get(task_id)
  113. if cache and cached_meta and \
  114. cached_meta['status'] in READY_STATES:
  115. return cached_meta
  116. else:
  117. try:
  118. return self.consume(task_id, timeout=timeout, no_ack=no_ack,
  119. on_interval=on_interval)
  120. except socket.timeout:
  121. raise TimeoutError('The operation timed out.')
  122. def get_task_meta(self, task_id, backlog_limit=1000):
  123. # Polling and using basic_get
  124. with self.app.pool.acquire_channel(block=True) as (_, channel):
  125. binding = self._create_binding(task_id)(channel)
  126. binding.declare()
  127. prev = latest = acc = None
  128. for i in range(backlog_limit): # spool ffwd
  129. acc = binding.get(
  130. accept=self.accept, no_ack=False,
  131. )
  132. if not acc: # no more messages
  133. break
  134. if acc.payload['task_id'] == task_id:
  135. prev, latest = latest, acc
  136. if prev:
  137. # backends are not expected to keep history,
  138. # so we delete everything except the most recent state.
  139. prev.ack()
  140. prev = None
  141. else:
  142. raise self.BacklogLimitExceeded(task_id)
  143. if latest:
  144. payload = self._cache[task_id] = self.meta_from_decoded(
  145. latest.payload)
  146. latest.requeue()
  147. return payload
  148. else:
  149. # no new state, use previous
  150. try:
  151. return self._cache[task_id]
  152. except KeyError:
  153. # result probably pending.
  154. return {'status': states.PENDING, 'result': None}
  155. poll = get_task_meta # XXX compat
  156. def drain_events(self, connection, consumer,
  157. timeout=None, on_interval=None, now=monotonic, wait=None):
  158. wait = wait or connection.drain_events
  159. results = {}
  160. def callback(meta, message):
  161. if meta['status'] in states.READY_STATES:
  162. results[meta['task_id']] = self.meta_from_decoded(meta)
  163. consumer.callbacks[:] = [callback]
  164. time_start = now()
  165. while 1:
  166. # Total time spent may exceed a single call to wait()
  167. if timeout and now() - time_start >= timeout:
  168. raise socket.timeout()
  169. try:
  170. wait(timeout=1)
  171. except socket.timeout:
  172. pass
  173. if on_interval:
  174. on_interval()
  175. if results: # got event on the wanted channel.
  176. break
  177. self._cache.update(results)
  178. return results
  179. def consume(self, task_id, timeout=None, no_ack=True, on_interval=None):
  180. wait = self.drain_events
  181. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  182. binding = self._create_binding(task_id)
  183. with self.Consumer(channel, binding,
  184. no_ack=no_ack, accept=self.accept) as consumer:
  185. while 1:
  186. try:
  187. return wait(
  188. conn, consumer, timeout, on_interval)[task_id]
  189. except KeyError:
  190. continue
  191. def _many_bindings(self, ids):
  192. return [self._create_binding(task_id) for task_id in ids]
  193. def get_many(self, task_ids, timeout=None, no_ack=True,
  194. on_message=None, on_interval=None,
  195. now=monotonic, getfields=itemgetter('status', 'task_id'),
  196. READY_STATES=states.READY_STATES,
  197. PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):
  198. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  199. ids = set(task_ids)
  200. cached_ids = set()
  201. mark_cached = cached_ids.add
  202. for task_id in ids:
  203. try:
  204. cached = self._cache[task_id]
  205. except KeyError:
  206. pass
  207. else:
  208. if cached['status'] in READY_STATES:
  209. yield task_id, cached
  210. mark_cached(task_id)
  211. ids.difference_update(cached_ids)
  212. results = deque()
  213. push_result = results.append
  214. push_cache = self._cache.__setitem__
  215. decode_result = self.meta_from_decoded
  216. def _on_message(message):
  217. body = decode_result(message.decode())
  218. if on_message is not None:
  219. on_message(body)
  220. state, uid = getfields(body)
  221. if state in READY_STATES:
  222. push_result(body) \
  223. if uid in task_ids else push_cache(uid, body)
  224. bindings = self._many_bindings(task_ids)
  225. with self.Consumer(channel, bindings, on_message=_on_message,
  226. accept=self.accept, no_ack=no_ack):
  227. wait = conn.drain_events
  228. popleft = results.popleft
  229. while ids:
  230. wait(timeout=timeout)
  231. while results:
  232. state = popleft()
  233. task_id = state['task_id']
  234. ids.discard(task_id)
  235. push_cache(task_id, state)
  236. yield task_id, state
  237. if on_interval:
  238. on_interval()
  239. def reload_task_result(self, task_id):
  240. raise NotImplementedError(
  241. 'reload_task_result is not supported by this backend.')
  242. def reload_group_result(self, task_id):
  243. """Reload group result, even if it has been previously fetched."""
  244. raise NotImplementedError(
  245. 'reload_group_result is not supported by this backend.')
  246. def save_group(self, group_id, result):
  247. raise NotImplementedError(
  248. 'save_group is not supported by this backend.')
  249. def restore_group(self, group_id, cache=True):
  250. raise NotImplementedError(
  251. 'restore_group is not supported by this backend.')
  252. def delete_group(self, group_id):
  253. raise NotImplementedError(
  254. 'delete_group is not supported by this backend.')
  255. def __reduce__(self, args=(), kwargs={}):
  256. kwargs.update(
  257. connection=self._connection,
  258. exchange=self.exchange.name,
  259. exchange_type=self.exchange.type,
  260. persistent=self.persistent,
  261. serializer=self.serializer,
  262. auto_delete=self.auto_delete,
  263. expires=self.expires,
  264. )
  265. return super(AMQPBackend, self).__reduce__(args, kwargs)
  266. def as_uri(self, include_password=True):
  267. return 'amqp://'