amqp.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. # -*- coding: utf-8 -*-
  2. """
  3. celery.backends.amqp
  4. ~~~~~~~~~~~~~~~~~~~~
  5. The AMQP result backend.
  6. This backend publishes results as messages.
  7. """
  8. from __future__ import absolute_import
  9. import socket
  10. import time
  11. from collections import deque
  12. from operator import itemgetter
  13. from kombu import Exchange, Queue, Producer, Consumer
  14. from celery import states
  15. from celery.exceptions import TimeoutError
  16. from celery.five import range
  17. from celery.utils.functional import dictfilter
  18. from celery.utils.log import get_logger
  19. from celery.utils.timeutils import maybe_s_to_ms
  20. from .base import BaseBackend
  21. __all__ = ['BacklogLimitExceeded', 'AMQPBackend']
  22. logger = get_logger(__name__)
  23. class BacklogLimitExceeded(Exception):
  24. """Too much state history to fast-forward."""
  25. def repair_uuid(s):
  26. # Historically the dashes in UUIDS are removed from AMQ entity names,
  27. # but there is no known reason to. Hopefully we'll be able to fix
  28. # this in v4.0.
  29. return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
  30. class AMQPBackend(BaseBackend):
  31. """Publishes results by sending messages."""
  32. Exchange = Exchange
  33. Queue = Queue
  34. Consumer = Consumer
  35. Producer = Producer
  36. BacklogLimitExceeded = BacklogLimitExceeded
  37. supports_autoexpire = True
  38. supports_native_join = True
  39. retry_policy = {
  40. 'max_retries': 20,
  41. 'interval_start': 0,
  42. 'interval_step': 1,
  43. 'interval_max': 1,
  44. }
  45. def __init__(self, app, connection=None, exchange=None, exchange_type=None,
  46. persistent=None, serializer=None, auto_delete=True, **kwargs):
  47. super(AMQPBackend, self).__init__(app, **kwargs)
  48. conf = self.app.conf
  49. self._connection = connection
  50. self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None
  51. else persistent)
  52. exchange = exchange or conf.CELERY_RESULT_EXCHANGE
  53. exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
  54. self.exchange = self._create_exchange(exchange, exchange_type,
  55. self.persistent)
  56. self.serializer = serializer or conf.CELERY_RESULT_SERIALIZER
  57. self.auto_delete = auto_delete
  58. self.expires = None
  59. if 'expires' not in kwargs or kwargs['expires'] is not None:
  60. self.expires = self.prepare_expires(kwargs.get('expires'))
  61. self.queue_arguments = dictfilter({
  62. 'x-expires': maybe_s_to_ms(self.expires),
  63. })
  64. def _create_exchange(self, name, type='direct', persistent=True):
  65. delivery_mode = persistent and 'persistent' or 'transient'
  66. return self.Exchange(name=name,
  67. type=type,
  68. delivery_mode=delivery_mode,
  69. durable=self.persistent,
  70. auto_delete=False)
  71. def _create_binding(self, task_id):
  72. name = task_id.replace('-', '')
  73. return self.Queue(name=name,
  74. exchange=self.exchange,
  75. routing_key=name,
  76. durable=self.persistent,
  77. auto_delete=self.auto_delete,
  78. queue_arguments=self.queue_arguments)
  79. def revive(self, channel):
  80. pass
  81. def _routing_key(self, task_id):
  82. return task_id.replace('-', '')
  83. def _store_result(self, task_id, result, status, traceback=None):
  84. """Send task return value and status."""
  85. with self.app.amqp.producer_pool.acquire(block=True) as pub:
  86. pub.publish({'task_id': task_id, 'status': status,
  87. 'result': self.encode_result(result, status),
  88. 'traceback': traceback,
  89. 'children': self.current_task_children()},
  90. exchange=self.exchange,
  91. routing_key=self._routing_key(task_id),
  92. serializer=self.serializer,
  93. retry=True, retry_policy=self.retry_policy,
  94. declare=self.on_reply_declare(task_id))
  95. return result
  96. def on_reply_declare(self, task_id):
  97. return [self._create_binding(task_id)]
  98. def wait_for(self, task_id, timeout=None, cache=True, propagate=True,
  99. READY_STATES=states.READY_STATES,
  100. PROPAGATE_STATES=states.PROPAGATE_STATES,
  101. **kwargs):
  102. cached_meta = self._cache.get(task_id)
  103. if cache and cached_meta and \
  104. cached_meta['status'] in READY_STATES:
  105. meta = cached_meta
  106. else:
  107. try:
  108. meta = self.consume(task_id, timeout=timeout)
  109. except socket.timeout:
  110. raise TimeoutError('The operation timed out.')
  111. if meta['status'] in PROPAGATE_STATES and propagate:
  112. raise self.exception_to_python(meta['result'])
  113. # consume() always returns READY_STATE.
  114. return meta['result']
  115. def get_task_meta(self, task_id, backlog_limit=1000):
  116. # Polling and using basic_get
  117. with self.app.pool.acquire_channel(block=True) as (_, channel):
  118. binding = self._create_binding(task_id)(channel)
  119. binding.declare()
  120. prev = latest = acc = None
  121. print('binding.get: %r' % (binding.get, ))
  122. for i in range(backlog_limit): # spool ffwd
  123. prev, latest, acc = latest, acc, binding.get(
  124. accept=self.accept, no_ack=False,
  125. )
  126. if not acc: # no more messages
  127. break
  128. if prev:
  129. # backends are not expected to keep history,
  130. # so we delete everything except the most recent state.
  131. prev.ack()
  132. else:
  133. raise self.BacklogLimitExceeded(task_id)
  134. if latest:
  135. payload = self._cache[task_id] = latest.payload
  136. latest.requeue()
  137. return payload
  138. else:
  139. # no new state, use previous
  140. try:
  141. return self._cache[task_id]
  142. except KeyError:
  143. # result probably pending.
  144. return {'status': states.PENDING, 'result': None}
  145. poll = get_task_meta # XXX compat
  146. def drain_events(self, connection, consumer,
  147. timeout=None, now=time.time, wait=None):
  148. wait = wait or connection.drain_events
  149. results = {}
  150. def callback(meta, message):
  151. if meta['status'] in states.READY_STATES:
  152. results[meta['task_id']] = meta
  153. consumer.callbacks[:] = [callback]
  154. time_start = now()
  155. while 1:
  156. # Total time spent may exceed a single call to wait()
  157. if timeout and now() - time_start >= timeout:
  158. raise socket.timeout()
  159. wait(timeout=timeout)
  160. if results: # got event on the wanted channel.
  161. break
  162. self._cache.update(results)
  163. return results
  164. def consume(self, task_id, timeout=None):
  165. wait = self.drain_events
  166. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  167. binding = self._create_binding(task_id)
  168. with self.Consumer(channel, binding,
  169. no_ack=True, accept=self.accept) as consumer:
  170. while 1:
  171. try:
  172. return wait(conn, consumer, timeout)[task_id]
  173. except KeyError:
  174. continue
  175. def _many_bindings(self, ids):
  176. return [self._create_binding(task_id) for task_id in ids]
  177. def get_many(self, task_ids, timeout=None,
  178. now=time.time, getfields=itemgetter('status', 'task_id'),
  179. READY_STATES=states.READY_STATES, **kwargs):
  180. with self.app.pool.acquire_channel(block=True) as (conn, channel):
  181. ids = set(task_ids)
  182. cached_ids = set()
  183. mark_cached = cached_ids.add
  184. for task_id in ids:
  185. try:
  186. cached = self._cache[task_id]
  187. except KeyError:
  188. pass
  189. else:
  190. if cached['status'] in READY_STATES:
  191. yield task_id, cached
  192. mark_cached(task_id)
  193. ids.difference_update(cached_ids)
  194. results = deque()
  195. push_result = results.append
  196. push_cache = self._cache.__setitem__
  197. def on_message(message):
  198. body = message.decode()
  199. state, uid = getfields(body)
  200. if state in READY_STATES:
  201. push_result(body) \
  202. if uid in task_ids else push_cache(uid, body)
  203. bindings = self._many_bindings(task_ids)
  204. with self.Consumer(channel, bindings, on_message=on_message,
  205. accept=self.accept, no_ack=True):
  206. wait = conn.drain_events
  207. popleft = results.popleft
  208. while ids:
  209. wait(timeout=timeout)
  210. while results:
  211. state = popleft()
  212. task_id = state['task_id']
  213. ids.discard(task_id)
  214. push_cache(task_id, state)
  215. yield task_id, state
  216. def reload_task_result(self, task_id):
  217. raise NotImplementedError(
  218. 'reload_task_result is not supported by this backend.')
  219. def reload_group_result(self, task_id):
  220. """Reload group result, even if it has been previously fetched."""
  221. raise NotImplementedError(
  222. 'reload_group_result is not supported by this backend.')
  223. def save_group(self, group_id, result):
  224. raise NotImplementedError(
  225. 'save_group is not supported by this backend.')
  226. def restore_group(self, group_id, cache=True):
  227. raise NotImplementedError(
  228. 'restore_group is not supported by this backend.')
  229. def delete_group(self, group_id):
  230. raise NotImplementedError(
  231. 'delete_group is not supported by this backend.')
  232. def __reduce__(self, args=(), kwargs={}):
  233. kwargs.update(
  234. connection=self._connection,
  235. exchange=self.exchange.name,
  236. exchange_type=self.exchange.type,
  237. persistent=self.persistent,
  238. serializer=self.serializer,
  239. auto_delete=self.auto_delete,
  240. expires=self.expires,
  241. )
  242. return super(AMQPBackend, self).__reduce__(args, kwargs)