|
@@ -13,27 +13,23 @@
|
|
|
"""
|
|
|
from __future__ import absolute_import
|
|
|
|
|
|
-import socket
|
|
|
import sys
|
|
|
import time
|
|
|
|
|
|
-from collections import deque
|
|
|
from datetime import timedelta
|
|
|
-from weakref import WeakKeyDictionary
|
|
|
|
|
|
from billiard.einfo import ExceptionInfo
|
|
|
from kombu.serialization import (
|
|
|
dumps, loads, prepare_accept_content,
|
|
|
registry as serializer_registry,
|
|
|
)
|
|
|
-from kombu.syn import detect_environment
|
|
|
from kombu.utils.encoding import bytes_to_str, ensure_bytes, from_utf8
|
|
|
|
|
|
from celery import states
|
|
|
from celery import current_app, group, maybe_signature
|
|
|
from celery.app import current_task
|
|
|
from celery.exceptions import ChordError, TimeoutError, TaskRevokedError
|
|
|
-from celery.five import items, monotonic
|
|
|
+from celery.five import items
|
|
|
from celery.result import (
|
|
|
GroupResult, ResultBase, allow_join_result, result_from_tuple,
|
|
|
)
|
|
@@ -432,185 +428,11 @@ class SyncBackendMixin(object):
|
|
|
return result
|
|
|
|
|
|
|
|
|
-class AsyncBackendMixin(object):
|
|
|
-
|
|
|
- def _collect_into(self, result, bucket):
|
|
|
- self.result_consumer.buckets[result] = bucket
|
|
|
-
|
|
|
- def iter_native(self, result, timeout=None, interval=0.5, no_ack=True,
|
|
|
- on_message=None, on_interval=None):
|
|
|
- results = result.results
|
|
|
- if not results:
|
|
|
- raise StopIteration()
|
|
|
-
|
|
|
- bucket = deque()
|
|
|
- for result in results:
|
|
|
- self._collect_into(result, bucket)
|
|
|
-
|
|
|
- for _ in self._wait_for_pending(
|
|
|
- result,
|
|
|
- timeout=timeout, interval=interval, no_ack=no_ack,
|
|
|
- on_message=on_message, on_interval=on_interval):
|
|
|
- while bucket:
|
|
|
- result = bucket.popleft()
|
|
|
- yield result.id, result._cache
|
|
|
- while bucket:
|
|
|
- result = bucket.popleft()
|
|
|
- yield result.id, result._cache
|
|
|
-
|
|
|
- def add_pending_result(self, result):
|
|
|
- if result.id not in self._pending_results:
|
|
|
- self._pending_results[result.id] = result
|
|
|
- self.result_consumer.consume_from(self._create_binding(result.id))
|
|
|
- return result
|
|
|
-
|
|
|
- def remove_pending_result(self, result):
|
|
|
- self._pending_results.pop(result.id, None)
|
|
|
- self.on_result_fulfilled(result)
|
|
|
- return result
|
|
|
-
|
|
|
- def on_result_fulfilled(self, result):
|
|
|
- pass
|
|
|
-
|
|
|
- def wait_for_pending(self, result,
|
|
|
- callback=None, propagate=True, **kwargs):
|
|
|
- for _ in self._wait_for_pending(result, **kwargs):
|
|
|
- pass
|
|
|
- return result.maybe_throw(callback=callback, propagate=propagate)
|
|
|
-
|
|
|
- def _wait_for_pending(self, result, timeout=None, interval=0.5,
|
|
|
- no_ack=True, on_interval=None, on_message=None,
|
|
|
- callback=None, propagate=True):
|
|
|
- return self.result_consumer._wait_for_pending(
|
|
|
- result, timeout=timeout, interval=interval,
|
|
|
- no_ack=no_ack, on_interval=on_interval,
|
|
|
- callback=callback, on_message=on_message, propagate=propagate,
|
|
|
- )
|
|
|
-
|
|
|
-
|
|
|
class BaseBackend(Backend, SyncBackendMixin):
|
|
|
pass
|
|
|
BaseDictBackend = BaseBackend # XXX compat
|
|
|
|
|
|
|
|
|
-
|
|
|
-class Drainer(object):
|
|
|
-
|
|
|
- def __init__(self, result_consumer):
|
|
|
- self.result_consumer = result_consumer
|
|
|
-
|
|
|
- def drain_events_until(self, p, timeout=None, on_interval=None,
|
|
|
- monotonic=monotonic, wait=None):
|
|
|
- wait = wait or self.result_consumer.drain_events
|
|
|
- time_start = monotonic()
|
|
|
-
|
|
|
- while 1:
|
|
|
- # Total time spent may exceed a single call to wait()
|
|
|
- if timeout and monotonic() - time_start >= timeout:
|
|
|
- raise socket.timeout()
|
|
|
- try:
|
|
|
- yield self.wait_for(p, wait, timeout=1)
|
|
|
- except socket.timeout:
|
|
|
- pass
|
|
|
- if on_interval:
|
|
|
- on_interval()
|
|
|
- if p.ready: # got event on the wanted channel.
|
|
|
- break
|
|
|
-
|
|
|
- def wait_for(self, p, wait, timeout=None):
|
|
|
- wait(timeout=timeout)
|
|
|
-
|
|
|
-
|
|
|
-class EventletDrainer(Drainer):
|
|
|
- _g = None
|
|
|
- _stopped = False
|
|
|
-
|
|
|
- def run(self):
|
|
|
- while not self._stopped:
|
|
|
- try:
|
|
|
- print("DRAINING!!!!!!!!!!!!!!!!")
|
|
|
- self.result_consumer.drain_events(timeout=10)
|
|
|
- except socket.timeout:
|
|
|
- pass
|
|
|
-
|
|
|
- def start(self):
|
|
|
- from eventlet import spawn
|
|
|
- if self._g is None:
|
|
|
- self._g = spawn(self.run)
|
|
|
-
|
|
|
- def stop(self):
|
|
|
- self._stopped = True
|
|
|
-
|
|
|
- def wait_for(self, p, wait, timeout=None):
|
|
|
- if self._g is None:
|
|
|
- self.start()
|
|
|
- if not p.ready:
|
|
|
- time.sleep(0)
|
|
|
-
|
|
|
-
|
|
|
-drainers = {'default': Drainer, 'eventlet': EventletDrainer}
|
|
|
-
|
|
|
-class BaseResultConsumer(object):
|
|
|
-
|
|
|
- def __init__(self, backend, app, accept, pending_results):
|
|
|
- self.backend = backend
|
|
|
- self.app = app
|
|
|
- self.accept = accept
|
|
|
- self._pending_results = pending_results
|
|
|
- self.on_message = None
|
|
|
- self.buckets = WeakKeyDictionary()
|
|
|
- self.drainer = drainers[detect_environment()](self)
|
|
|
-
|
|
|
- def drain_events(self, timeout=None):
|
|
|
- raise NotImplementedError('subclass responsibility')
|
|
|
-
|
|
|
- def _after_fork(self):
|
|
|
- self.bucket.clear()
|
|
|
- self.buckets = WeakKeyDictionary()
|
|
|
- self.on_message = None
|
|
|
- self.on_after_fork()
|
|
|
-
|
|
|
- def on_after_fork(self):
|
|
|
- pass
|
|
|
-
|
|
|
- def drain_events_until(self, p, timeout=None, on_interval=None):
|
|
|
- return self.drainer.drain_events_until(
|
|
|
- p, timeout=timeout, on_interval=on_interval)
|
|
|
-
|
|
|
- def _wait_for_pending(self, result, timeout=None, interval=0.5,
|
|
|
- no_ack=True, on_interval=None, callback=None,
|
|
|
- on_message=None, propagate=True):
|
|
|
- prev_on_m, self.on_message = self.on_message, on_message
|
|
|
- try:
|
|
|
- for _ in self.drain_events_until(
|
|
|
- result.on_ready, timeout=timeout,
|
|
|
- on_interval=on_interval):
|
|
|
- yield
|
|
|
- time.sleep(0)
|
|
|
- except socket.timeout:
|
|
|
- raise TimeoutError('The operation timed out.')
|
|
|
- finally:
|
|
|
- self.on_message = prev_on_m
|
|
|
-
|
|
|
- def on_state_change(self, meta, message):
|
|
|
- if self.on_message:
|
|
|
- self.on_message(meta)
|
|
|
- if meta['status'] in states.READY_STATES:
|
|
|
- try:
|
|
|
- result = self._pending_results[meta['task_id']]
|
|
|
- except KeyError:
|
|
|
- return
|
|
|
- result._maybe_set_cache(meta)
|
|
|
- buckets = self.buckets
|
|
|
- try:
|
|
|
- buckets[result].append(result)
|
|
|
- buckets.pop(result)
|
|
|
- except KeyError:
|
|
|
- pass
|
|
|
- time.sleep(0)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
class KeyValueStoreBackend(BaseBackend):
|
|
|
key_t = ensure_bytes
|
|
|
task_keyprefix = 'celery-task-meta-'
|