state.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. # -*- coding: utf-8 -*-
  2. """
  3. celery.events.state
  4. ~~~~~~~~~~~~~~~~~~~
  5. This module implements a data-structure used to keep
  6. track of the state of a cluster of workers and the tasks
  7. it is working on (by consuming events).
  8. For every event consumed the state is updated,
  9. so the state represents the state of the cluster
  10. at the time of the last event.
  11. Snapshots (:mod:`celery.events.snapshot`) can be used to
  12. take "pictures" of this state at regular intervals
  13. to e.g. store that in a database.
  14. """
  15. from __future__ import absolute_import, unicode_literals
  16. import bisect
  17. import sys
  18. import threading
  19. from datetime import datetime
  20. from decimal import Decimal
  21. from itertools import islice
  22. from operator import itemgetter
  23. from time import time
  24. from weakref import WeakSet, ref
  25. from kombu.clocks import timetuple
  26. from kombu.utils import cached_property
  27. from celery import states
  28. from celery.five import items, python_2_unicode_compatible, values
  29. from celery.utils.functional import LRUCache, memoize
  30. from celery.utils.log import get_logger
  31. __all__ = ['Worker', 'Task', 'State', 'heartbeat_expires']
  32. PYPY = hasattr(sys, 'pypy_version_info')
  33. # The window (in percentage) is added to the workers heartbeat
  34. # frequency. If the time between updates exceeds this window,
  35. # then the worker is considered to be offline.
  36. HEARTBEAT_EXPIRE_WINDOW = 200
  37. # Max drift between event timestamp and time of event received
  38. # before we alert that clocks may be unsynchronized.
  39. HEARTBEAT_DRIFT_MAX = 16
  40. DRIFT_WARNING = """\
  41. Substantial drift from %s may mean clocks are out of sync. Current drift is
  42. %s seconds. [orig: %s recv: %s]
  43. """
  44. logger = get_logger(__name__)
  45. warn = logger.warning
  46. R_STATE = '<State: events={0.event_count} tasks={0.task_count}>'
  47. R_WORKER = '<Worker: {0.hostname} ({0.status_string} clock:{0.clock})'
  48. R_TASK = '<Task: {0.name}({0.uuid}) {0.state} clock:{0.clock}>'
  49. #: Mapping of task event names to task state.
  50. TASK_EVENT_TO_STATE = {
  51. 'sent': states.PENDING,
  52. 'received': states.RECEIVED,
  53. 'started': states.STARTED,
  54. 'failed': states.FAILURE,
  55. 'retried': states.RETRY,
  56. 'succeeded': states.SUCCESS,
  57. 'revoked': states.REVOKED,
  58. 'rejected': states.REJECTED,
  59. }
  60. @memoize(maxsize=1000, keyfun=lambda a, _: a[0])
  61. def _warn_drift(hostname, drift, local_received, timestamp):
  62. # we use memoize here so the warning is only logged once per hostname
  63. warn(DRIFT_WARNING, hostname, drift,
  64. datetime.fromtimestamp(local_received),
  65. datetime.fromtimestamp(timestamp))
  66. def heartbeat_expires(timestamp, freq=60,
  67. expire_window=HEARTBEAT_EXPIRE_WINDOW,
  68. Decimal=Decimal, float=float, isinstance=isinstance):
  69. # some json implementations returns decimal.Decimal objects,
  70. # which are not compatible with float.
  71. freq = float(freq) if isinstance(freq, Decimal) else freq
  72. if isinstance(timestamp, Decimal):
  73. timestamp = float(timestamp)
  74. return timestamp + (freq * (expire_window / 1e2))
  75. def _depickle_task(cls, fields):
  76. return cls(**fields)
  77. def with_unique_field(attr):
  78. def _decorate_cls(cls):
  79. def __eq__(this, other):
  80. if isinstance(other, this.__class__):
  81. return getattr(this, attr) == getattr(other, attr)
  82. return NotImplemented
  83. cls.__eq__ = __eq__
  84. def __ne__(this, other):
  85. res = this.__eq__(other)
  86. return True if res is NotImplemented else not res
  87. cls.__ne__ = __ne__
  88. def __hash__(this):
  89. return hash(getattr(this, attr))
  90. cls.__hash__ = __hash__
  91. return cls
  92. return _decorate_cls
  93. @with_unique_field('hostname')
  94. @python_2_unicode_compatible
  95. class Worker(object):
  96. """Worker State."""
  97. heartbeat_max = 4
  98. expire_window = HEARTBEAT_EXPIRE_WINDOW
  99. _fields = ('hostname', 'pid', 'freq', 'heartbeats', 'clock',
  100. 'active', 'processed', 'loadavg', 'sw_ident',
  101. 'sw_ver', 'sw_sys')
  102. if not PYPY: # pragma: no cover
  103. __slots__ = _fields + ('event', '__dict__', '__weakref__')
  104. def __init__(self, hostname=None, pid=None, freq=60,
  105. heartbeats=None, clock=0, active=None, processed=None,
  106. loadavg=None, sw_ident=None, sw_ver=None, sw_sys=None):
  107. self.hostname = hostname
  108. self.pid = pid
  109. self.freq = freq
  110. self.heartbeats = [] if heartbeats is None else heartbeats
  111. self.clock = clock or 0
  112. self.active = active
  113. self.processed = processed
  114. self.loadavg = loadavg
  115. self.sw_ident = sw_ident
  116. self.sw_ver = sw_ver
  117. self.sw_sys = sw_sys
  118. self.event = self._create_event_handler()
  119. def __reduce__(self):
  120. return self.__class__, (self.hostname, self.pid, self.freq,
  121. self.heartbeats, self.clock, self.active,
  122. self.processed, self.loadavg, self.sw_ident,
  123. self.sw_ver, self.sw_sys)
  124. def _create_event_handler(self):
  125. _set = object.__setattr__
  126. hbmax = self.heartbeat_max
  127. heartbeats = self.heartbeats
  128. hb_pop = self.heartbeats.pop
  129. hb_append = self.heartbeats.append
  130. def event(type_, timestamp=None,
  131. local_received=None, fields=None,
  132. max_drift=HEARTBEAT_DRIFT_MAX, items=items, abs=abs, int=int,
  133. insort=bisect.insort, len=len):
  134. fields = fields or {}
  135. for k, v in items(fields):
  136. _set(self, k, v)
  137. if type_ == 'offline':
  138. heartbeats[:] = []
  139. else:
  140. if not local_received or not timestamp:
  141. return
  142. drift = abs(int(local_received) - int(timestamp))
  143. if drift > max_drift:
  144. _warn_drift(self.hostname, drift,
  145. local_received, timestamp)
  146. if local_received: # pragma: no cover
  147. hearts = len(heartbeats)
  148. if hearts > hbmax - 1:
  149. hb_pop(0)
  150. if hearts and local_received > heartbeats[-1]:
  151. hb_append(local_received)
  152. else:
  153. insort(heartbeats, local_received)
  154. return event
  155. def update(self, f, **kw):
  156. for k, v in items(dict(f, **kw) if kw else f):
  157. setattr(self, k, v)
  158. def __repr__(self):
  159. return R_WORKER.format(self)
  160. @property
  161. def status_string(self):
  162. return 'ONLINE' if self.alive else 'OFFLINE'
  163. @property
  164. def heartbeat_expires(self):
  165. return heartbeat_expires(self.heartbeats[-1],
  166. self.freq, self.expire_window)
  167. @property
  168. def alive(self, nowfun=time):
  169. return bool(self.heartbeats and nowfun() < self.heartbeat_expires)
  170. @property
  171. def id(self):
  172. return '{0.hostname}.{0.pid}'.format(self)
  173. @with_unique_field('uuid')
  174. @python_2_unicode_compatible
  175. class Task(object):
  176. """Task State."""
  177. name = received = sent = started = succeeded = failed = retried = \
  178. revoked = rejected = args = kwargs = eta = expires = retries = \
  179. worker = result = exception = timestamp = runtime = traceback = \
  180. exchange = routing_key = root_id = parent_id = client = None
  181. state = states.PENDING
  182. clock = 0
  183. _fields = (
  184. 'uuid', 'name', 'state', 'received', 'sent', 'started', 'rejected',
  185. 'succeeded', 'failed', 'retried', 'revoked', 'args', 'kwargs',
  186. 'eta', 'expires', 'retries', 'worker', 'result', 'exception',
  187. 'timestamp', 'runtime', 'traceback', 'exchange', 'routing_key',
  188. 'clock', 'client', 'root', 'root_id', 'parent', 'parent_id',
  189. 'children',
  190. )
  191. if not PYPY: # pragma: no cover
  192. __slots__ = ('__dict__', '__weakref__')
  193. #: How to merge out of order events.
  194. #: Disorder is detected by logical ordering (e.g. :event:`task-received`
  195. #: must have happened before a :event:`task-failed` event).
  196. #:
  197. #: A merge rule consists of a state and a list of fields to keep from
  198. #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
  199. #: fields are always taken from the RECEIVED state, and any values for
  200. #: these fields received before or after is simply ignored.
  201. merge_rules = {
  202. states.RECEIVED: (
  203. 'name', 'args', 'kwargs', 'parent_id',
  204. 'root_id' 'retries', 'eta', 'expires',
  205. ),
  206. }
  207. #: meth:`info` displays these fields by default.
  208. _info_fields = (
  209. 'args', 'kwargs', 'retries', 'result', 'eta', 'runtime',
  210. 'expires', 'exception', 'exchange', 'routing_key',
  211. 'root_id', 'parent_id',
  212. )
  213. def __init__(self, uuid=None, cluster_state=None, **kwargs):
  214. self.uuid = uuid
  215. self.children = WeakSet()
  216. self.cluster_state = cluster_state
  217. if kwargs:
  218. self.__dict__.update(kwargs)
  219. def event(self, type_, timestamp=None, local_received=None, fields=None,
  220. precedence=states.precedence, items=items,
  221. setattr=setattr, task_event_to_state=TASK_EVENT_TO_STATE.get,
  222. RETRY=states.RETRY):
  223. fields = fields or {}
  224. # using .get is faster than catching KeyError in this case.
  225. state = task_event_to_state(type_)
  226. if state is not None:
  227. # sets e.g. self.succeeded to the timestamp.
  228. setattr(self, type_, timestamp)
  229. else:
  230. state = type_.upper() # custom state
  231. # note that precedence here is reversed
  232. # see implementation in celery.states.state.__lt__
  233. if state != RETRY and self.state != RETRY and \
  234. precedence(state) > precedence(self.state):
  235. # this state logically happens-before the current state, so merge.
  236. keep = self.merge_rules.get(state)
  237. if keep is not None:
  238. fields = {
  239. k: v for k, v in items(fields) if k in keep
  240. }
  241. else:
  242. fields.update(state=state, timestamp=timestamp)
  243. # update current state with info from this event.
  244. self.__dict__.update(fields)
  245. def info(self, fields=None, extra=[]):
  246. """Information about this task suitable for on-screen display."""
  247. fields = self._info_fields if fields is None else fields
  248. def _keys():
  249. for key in list(fields) + list(extra):
  250. value = getattr(self, key, None)
  251. if value is not None:
  252. yield key, value
  253. return dict(_keys())
  254. def __repr__(self):
  255. return R_TASK.format(self)
  256. def as_dict(self):
  257. get = object.__getattribute__
  258. return {
  259. k: get(self, k) for k in self._fields
  260. }
  261. def __reduce__(self):
  262. return _depickle_task, (self.__class__, self.as_dict())
  263. @property
  264. def id(self):
  265. return self.uuid
  266. @property
  267. def origin(self):
  268. return self.client if self.worker is None else self.worker.id
  269. @property
  270. def ready(self):
  271. return self.state in states.READY_STATES
  272. @cached_property
  273. def parent(self):
  274. return self.parent_id and self.cluster_state.tasks[self.parent_id]
  275. @cached_property
  276. def root(self):
  277. return self.root_id and self.cluster_state.tasks[self.root_id]
  278. class State(object):
  279. """Records clusters state."""
  280. Worker = Worker
  281. Task = Task
  282. event_count = 0
  283. task_count = 0
  284. heap_multiplier = 4
  285. def __init__(self, callback=None,
  286. workers=None, tasks=None, taskheap=None,
  287. max_workers_in_memory=5000, max_tasks_in_memory=10000,
  288. on_node_join=None, on_node_leave=None):
  289. self.event_callback = callback
  290. self.workers = (LRUCache(max_workers_in_memory)
  291. if workers is None else workers)
  292. self.tasks = (LRUCache(max_tasks_in_memory)
  293. if tasks is None else tasks)
  294. self._taskheap = [] if taskheap is None else taskheap
  295. self.max_workers_in_memory = max_workers_in_memory
  296. self.max_tasks_in_memory = max_tasks_in_memory
  297. self.on_node_join = on_node_join
  298. self.on_node_leave = on_node_leave
  299. self._mutex = threading.Lock()
  300. self.handlers = {}
  301. self._seen_types = set()
  302. self._tasks_to_resolve = {}
  303. self.rebuild_taskheap()
  304. @cached_property
  305. def _event(self):
  306. return self._create_dispatcher()
  307. def freeze_while(self, fun, *args, **kwargs):
  308. clear_after = kwargs.pop('clear_after', False)
  309. with self._mutex:
  310. try:
  311. return fun(*args, **kwargs)
  312. finally:
  313. if clear_after:
  314. self._clear()
  315. def clear_tasks(self, ready=True):
  316. with self._mutex:
  317. return self._clear_tasks(ready)
  318. def _clear_tasks(self, ready=True):
  319. if ready:
  320. in_progress = {
  321. uuid: task for uuid, task in self.itertasks()
  322. if task.state not in states.READY_STATES
  323. }
  324. self.tasks.clear()
  325. self.tasks.update(in_progress)
  326. else:
  327. self.tasks.clear()
  328. self._taskheap[:] = []
  329. def _clear(self, ready=True):
  330. self.workers.clear()
  331. self._clear_tasks(ready)
  332. self.event_count = 0
  333. self.task_count = 0
  334. def clear(self, ready=True):
  335. with self._mutex:
  336. return self._clear(ready)
  337. def get_or_create_worker(self, hostname, **kwargs):
  338. """Get or create worker by hostname.
  339. Return tuple of ``(worker, was_created)``.
  340. """
  341. try:
  342. worker = self.workers[hostname]
  343. if kwargs:
  344. worker.update(kwargs)
  345. return worker, False
  346. except KeyError:
  347. worker = self.workers[hostname] = self.Worker(
  348. hostname, **kwargs)
  349. return worker, True
  350. def get_or_create_task(self, uuid):
  351. """Get or create task by uuid."""
  352. try:
  353. return self.tasks[uuid], False
  354. except KeyError:
  355. task = self.tasks[uuid] = self.Task(uuid, cluster_state=self)
  356. return task, True
  357. def event(self, event):
  358. with self._mutex:
  359. return self._event(event)
  360. def task_event(self, type_, fields):
  361. """Deprecated, use :meth:`event`."""
  362. return self._event(dict(fields, type='-'.join(['task', type_])))[0]
  363. def worker_event(self, type_, fields):
  364. """Deprecated, use :meth:`event`."""
  365. return self._event(dict(fields, type='-'.join(['worker', type_])))[0]
  366. def _create_dispatcher(self):
  367. get_handler = self.handlers.__getitem__
  368. event_callback = self.event_callback
  369. wfields = itemgetter('hostname', 'timestamp', 'local_received')
  370. tfields = itemgetter('uuid', 'hostname', 'timestamp',
  371. 'local_received', 'clock')
  372. taskheap = self._taskheap
  373. th_append = taskheap.append
  374. th_pop = taskheap.pop
  375. # Removing events from task heap is an O(n) operation,
  376. # so easier to just account for the common number of events
  377. # for each task (PENDING->RECEIVED->STARTED->final)
  378. #: an O(n) operation
  379. max_events_in_heap = self.max_tasks_in_memory * self.heap_multiplier
  380. add_type = self._seen_types.add
  381. on_node_join, on_node_leave = self.on_node_join, self.on_node_leave
  382. tasks, Task = self.tasks, self.Task
  383. workers, Worker = self.workers, self.Worker
  384. # avoid updating LRU entry at getitem
  385. get_worker, get_task = workers.data.__getitem__, tasks.data.__getitem__
  386. def _event(event,
  387. timetuple=timetuple, KeyError=KeyError,
  388. insort=bisect.insort, created=True):
  389. self.event_count += 1
  390. if event_callback:
  391. event_callback(self, event)
  392. group, _, subject = event['type'].partition('-')
  393. try:
  394. handler = get_handler(group)
  395. except KeyError:
  396. pass
  397. else:
  398. return handler(subject, event), subject
  399. if group == 'worker':
  400. try:
  401. hostname, timestamp, local_received = wfields(event)
  402. except KeyError:
  403. pass
  404. else:
  405. is_offline = subject == 'offline'
  406. try:
  407. worker, created = get_worker(hostname), False
  408. except KeyError:
  409. if is_offline:
  410. worker, created = Worker(hostname), False
  411. else:
  412. worker = workers[hostname] = Worker(hostname)
  413. worker.event(subject, timestamp, local_received, event)
  414. if on_node_join and (created or subject == 'online'):
  415. on_node_join(worker)
  416. if on_node_leave and is_offline:
  417. on_node_leave(worker)
  418. workers.pop(hostname, None)
  419. return (worker, created), subject
  420. elif group == 'task':
  421. (uuid, hostname, timestamp,
  422. local_received, clock) = tfields(event)
  423. # task-sent event is sent by client, not worker
  424. is_client_event = subject == 'sent'
  425. try:
  426. task, created = get_task(uuid), False
  427. except KeyError:
  428. task = tasks[uuid] = Task(uuid, cluster_state=self)
  429. if is_client_event:
  430. task.client = hostname
  431. else:
  432. try:
  433. worker, created = get_worker(hostname), False
  434. except KeyError:
  435. worker = workers[hostname] = Worker(hostname)
  436. task.worker = worker
  437. if worker is not None and local_received:
  438. worker.event(None, local_received, timestamp)
  439. origin = hostname if is_client_event else worker.id
  440. # remove oldest event if exceeding the limit.
  441. heaps = len(taskheap)
  442. if heaps + 1 > max_events_in_heap:
  443. th_pop(0)
  444. # most events will be dated later than the previous.
  445. timetup = timetuple(clock, timestamp, origin, ref(task))
  446. if heaps and timetup > taskheap[-1]:
  447. th_append(timetup)
  448. else:
  449. insort(taskheap, timetup)
  450. if subject == 'received':
  451. self.task_count += 1
  452. task.event(subject, timestamp, local_received, event)
  453. task_name = task.name
  454. if task_name is not None:
  455. add_type(task_name)
  456. if task.parent_id:
  457. try:
  458. parent_task = self.tasks[task.parent_id]
  459. except KeyError:
  460. self._add_pending_task_child(task)
  461. else:
  462. parent_task.children.add(task)
  463. try:
  464. _children = self._tasks_to_resolve.pop(uuid)
  465. except KeyError:
  466. pass
  467. else:
  468. task.children.update(_children)
  469. return (task, created), subject
  470. return _event
  471. def _add_pending_task_child(self, task):
  472. try:
  473. ch = self._tasks_to_resolve[task.parent_id]
  474. except KeyError:
  475. ch = self._tasks_to_resolve[task.parent_id] = WeakSet()
  476. ch.add(task)
  477. def rebuild_taskheap(self, timetuple=timetuple):
  478. heap = self._taskheap[:] = [
  479. timetuple(t.clock, t.timestamp, t.origin, ref(t))
  480. for t in values(self.tasks)
  481. ]
  482. heap.sort()
  483. def itertasks(self, limit=None):
  484. for index, row in enumerate(items(self.tasks)):
  485. yield row
  486. if limit and index + 1 >= limit:
  487. break
  488. def tasks_by_time(self, limit=None, reverse=True):
  489. """Generator giving tasks ordered by time,
  490. in ``(uuid, Task)`` tuples."""
  491. _heap = self._taskheap
  492. if reverse:
  493. _heap = reversed(_heap)
  494. seen = set()
  495. for evtup in islice(_heap, 0, limit):
  496. task = evtup[3]()
  497. if task is not None:
  498. uuid = task.uuid
  499. if uuid not in seen:
  500. yield uuid, task
  501. seen.add(uuid)
  502. tasks_by_timestamp = tasks_by_time
  503. def tasks_by_type(self, name, limit=None, reverse=True):
  504. """Get all tasks by type.
  505. Return a list of ``(uuid, Task)`` tuples.
  506. """
  507. return islice(
  508. ((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse)
  509. if task.name == name),
  510. 0, limit,
  511. )
  512. def tasks_by_worker(self, hostname, limit=None, reverse=True):
  513. """Get all tasks by worker.
  514. """
  515. return islice(
  516. ((uuid, task) for uuid, task in self.tasks_by_time(reverse=reverse)
  517. if task.worker.hostname == hostname),
  518. 0, limit,
  519. )
  520. def task_types(self):
  521. """Return a list of all seen task types."""
  522. return sorted(self._seen_types)
  523. def alive_workers(self):
  524. """Return a list of (seemingly) alive workers."""
  525. return [w for w in values(self.workers) if w.alive]
  526. def __repr__(self):
  527. return R_STATE.format(self)
  528. def __reduce__(self):
  529. return self.__class__, (
  530. self.event_callback, self.workers, self.tasks, None,
  531. self.max_workers_in_memory, self.max_tasks_in_memory,
  532. self.on_node_join, self.on_node_leave,
  533. )