trace.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. # -*- coding: utf-8 -*-
  2. """Trace task execution.
  3. This module defines how the task execution is traced:
  4. errors are recorded, handlers are applied and so on.
  5. """
  6. from __future__ import absolute_import, unicode_literals
  7. # ## ---
  8. # This is the heart of the worker, the inner loop so to speak.
  9. # It used to be split up into nice little classes and methods,
  10. # but in the end it only resulted in bad performance and horrible tracebacks,
  11. # so instead we now use one closure per task class.
  12. import logging
  13. import os
  14. import sys
  15. from collections import namedtuple
  16. from warnings import warn
  17. from billiard.einfo import ExceptionInfo
  18. from kombu.exceptions import EncodeError
  19. from kombu.serialization import loads as loads_message, prepare_accept_content
  20. from kombu.utils.encoding import safe_repr, safe_str
  21. from celery import current_app, group
  22. from celery import states, signals
  23. from celery._state import _task_stack
  24. from celery.app import set_default_app
  25. from celery.app.task import Task as BaseTask, Context
  26. from celery.exceptions import Ignore, Reject, Retry, InvalidTaskError
  27. from celery.five import monotonic
  28. from celery.utils.log import get_logger
  29. from celery.utils.nodenames import gethostname
  30. from celery.utils.objects import mro_lookup
  31. from celery.utils.saferepr import saferepr
  32. from celery.utils.serialization import (
  33. get_pickleable_exception, get_pickled_exception, get_pickleable_etype,
  34. )
  35. from celery.utils.text import truncate
  36. __all__ = [
  37. 'TraceInfo', 'build_tracer', 'trace_task',
  38. 'setup_worker_optimizations', 'reset_worker_optimizations',
  39. ]
  40. logger = get_logger(__name__)
  41. info = logger.info
  42. #: Format string used to log task success.
  43. LOG_SUCCESS = """\
  44. Task %(name)s[%(id)s] succeeded in %(runtime)ss: %(return_value)s\
  45. """
  46. #: Format string used to log task failure.
  47. LOG_FAILURE = """\
  48. Task %(name)s[%(id)s] %(description)s: %(exc)s\
  49. """
  50. #: Format string used to log task internal error.
  51. LOG_INTERNAL_ERROR = """\
  52. Task %(name)s[%(id)s] %(description)s: %(exc)s\
  53. """
  54. #: Format string used to log task ignored.
  55. LOG_IGNORED = """\
  56. Task %(name)s[%(id)s] %(description)s\
  57. """
  58. #: Format string used to log task rejected.
  59. LOG_REJECTED = """\
  60. Task %(name)s[%(id)s] %(exc)s\
  61. """
  62. #: Format string used to log task retry.
  63. LOG_RETRY = """\
  64. Task %(name)s[%(id)s] retry: %(exc)s\
  65. """
  66. log_policy_t = namedtuple(
  67. 'log_policy_t', ('format', 'description', 'severity', 'traceback', 'mail'),
  68. )
  69. log_policy_reject = log_policy_t(LOG_REJECTED, 'rejected', logging.WARN, 1, 1)
  70. log_policy_ignore = log_policy_t(LOG_IGNORED, 'ignored', logging.INFO, 0, 0)
  71. log_policy_internal = log_policy_t(
  72. LOG_INTERNAL_ERROR, 'INTERNAL ERROR', logging.CRITICAL, 1, 1,
  73. )
  74. log_policy_expected = log_policy_t(
  75. LOG_FAILURE, 'raised expected', logging.INFO, 0, 0,
  76. )
  77. log_policy_unexpected = log_policy_t(
  78. LOG_FAILURE, 'raised unexpected', logging.ERROR, 1, 1,
  79. )
  80. send_prerun = signals.task_prerun.send
  81. send_postrun = signals.task_postrun.send
  82. send_success = signals.task_success.send
  83. STARTED = states.STARTED
  84. SUCCESS = states.SUCCESS
  85. IGNORED = states.IGNORED
  86. REJECTED = states.REJECTED
  87. RETRY = states.RETRY
  88. FAILURE = states.FAILURE
  89. EXCEPTION_STATES = states.EXCEPTION_STATES
  90. IGNORE_STATES = frozenset({IGNORED, RETRY, REJECTED})
  91. #: set by :func:`setup_worker_optimizations`
  92. _localized = []
  93. _patched = {}
  94. trace_ok_t = namedtuple('trace_ok_t', ('retval', 'info', 'runtime', 'retstr'))
  95. def task_has_custom(task, attr):
  96. """Return true if the task or one of its bases
  97. defines ``attr`` (excluding the one in BaseTask)."""
  98. return mro_lookup(task.__class__, attr, stop={BaseTask, object},
  99. monkey_patched=['celery.app.task'])
  100. def get_log_policy(task, einfo, exc):
  101. if isinstance(exc, Reject):
  102. return log_policy_reject
  103. elif isinstance(exc, Ignore):
  104. return log_policy_ignore
  105. elif einfo.internal:
  106. return log_policy_internal
  107. else:
  108. if task.throws and isinstance(exc, task.throws):
  109. return log_policy_expected
  110. return log_policy_unexpected
  111. class TraceInfo:
  112. __slots__ = ('state', 'retval')
  113. def __init__(self, state, retval=None):
  114. self.state = state
  115. self.retval = retval
  116. def handle_error_state(self, task, req,
  117. eager=False, call_errbacks=True):
  118. store_errors = not eager
  119. if task.ignore_result:
  120. store_errors = task.store_errors_even_if_ignored
  121. return {
  122. RETRY: self.handle_retry,
  123. FAILURE: self.handle_failure,
  124. }[self.state](task, req,
  125. store_errors=store_errors,
  126. call_errbacks=call_errbacks)
  127. def handle_reject(self, task, req, **kwargs):
  128. self._log_error(task, req, ExceptionInfo())
  129. def handle_ignore(self, task, req, **kwargs):
  130. self._log_error(task, req, ExceptionInfo())
  131. def handle_retry(self, task, req, store_errors=True, **kwargs):
  132. """Handle retry exception."""
  133. # the exception raised is the Retry semi-predicate,
  134. # and it's exc' attribute is the original exception raised (if any).
  135. type_, _, tb = sys.exc_info()
  136. try:
  137. reason = self.retval
  138. einfo = ExceptionInfo((type_, reason, tb))
  139. if store_errors:
  140. task.backend.mark_as_retry(
  141. req.id, reason.exc, einfo.traceback, request=req,
  142. )
  143. task.on_retry(reason.exc, req.id, req.args, req.kwargs, einfo)
  144. signals.task_retry.send(sender=task, request=req,
  145. reason=reason, einfo=einfo)
  146. info(LOG_RETRY, {
  147. 'id': req.id, 'name': task.name,
  148. 'exc': safe_repr(reason.exc),
  149. })
  150. return einfo
  151. finally:
  152. del(tb)
  153. def handle_failure(self, task, req, store_errors=True, call_errbacks=True):
  154. """Handle exception."""
  155. _, _, tb = sys.exc_info()
  156. try:
  157. exc = self.retval
  158. # make sure we only send pickleable exceptions back to parent.
  159. einfo = ExceptionInfo()
  160. einfo.exception = get_pickleable_exception(einfo.exception)
  161. einfo.type = get_pickleable_etype(einfo.type)
  162. task.backend.mark_as_failure(
  163. req.id, exc, einfo.traceback,
  164. request=req, store_result=store_errors,
  165. call_errbacks=call_errbacks,
  166. )
  167. task.on_failure(exc, req.id, req.args, req.kwargs, einfo)
  168. signals.task_failure.send(sender=task, task_id=req.id,
  169. exception=exc, args=req.args,
  170. kwargs=req.kwargs,
  171. traceback=tb,
  172. einfo=einfo)
  173. self._log_error(task, req, einfo)
  174. return einfo
  175. finally:
  176. del(tb)
  177. def _log_error(self, task, req, einfo):
  178. eobj = einfo.exception = get_pickled_exception(einfo.exception)
  179. exception, traceback, exc_info, sargs, skwargs = (
  180. safe_repr(eobj),
  181. safe_str(einfo.traceback),
  182. einfo.exc_info,
  183. safe_repr(req.args),
  184. safe_repr(req.kwargs),
  185. )
  186. policy = get_log_policy(task, einfo, eobj)
  187. context = {
  188. 'hostname': req.hostname,
  189. 'id': req.id,
  190. 'name': task.name,
  191. 'exc': exception,
  192. 'traceback': traceback,
  193. 'args': sargs,
  194. 'kwargs': skwargs,
  195. 'description': policy.description,
  196. 'internal': einfo.internal,
  197. }
  198. logger.log(policy.severity, policy.format.strip(), context,
  199. exc_info=exc_info if policy.traceback else None,
  200. extra={'data': context})
  201. def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
  202. Info=TraceInfo, eager=False, propagate=False, app=None,
  203. monotonic=monotonic, truncate=truncate,
  204. trace_ok_t=trace_ok_t, IGNORE_STATES=IGNORE_STATES):
  205. """Return a function that traces task execution; catches all
  206. exceptions and updates result backend with the state and result
  207. If the call was successful, it saves the result to the task result
  208. backend, and sets the task status to `"SUCCESS"`.
  209. If the call raises :exc:`~@Retry`, it extracts
  210. the original exception, uses that as the result and sets the task state
  211. to `"RETRY"`.
  212. If the call results in an exception, it saves the exception as the task
  213. result, and sets the task state to `"FAILURE"`.
  214. Return a function that takes the following arguments:
  215. :param uuid: The id of the task.
  216. :param args: List of positional args to pass on to the function.
  217. :param kwargs: Keyword arguments mapping to pass on to the function.
  218. :keyword request: Request dict.
  219. """
  220. # If the task doesn't define a custom __call__ method
  221. # we optimize it away by simply calling the run method directly,
  222. # saving the extra method call and a line less in the stack trace.
  223. fun = task if task_has_custom(task, '__call__') else task.run
  224. loader = loader or app.loader
  225. backend = task.backend
  226. ignore_result = task.ignore_result
  227. track_started = task.track_started
  228. track_started = not eager and (task.track_started and not ignore_result)
  229. publish_result = not eager and not ignore_result
  230. hostname = hostname or gethostname()
  231. loader_task_init = loader.on_task_init
  232. loader_cleanup = loader.on_process_cleanup
  233. task_on_success = None
  234. task_after_return = None
  235. if task_has_custom(task, 'on_success'):
  236. task_on_success = task.on_success
  237. if task_has_custom(task, 'after_return'):
  238. task_after_return = task.after_return
  239. store_result = backend.store_result
  240. mark_as_done = backend.mark_as_done
  241. backend_cleanup = backend.process_cleanup
  242. pid = os.getpid()
  243. request_stack = task.request_stack
  244. push_request = request_stack.push
  245. pop_request = request_stack.pop
  246. push_task = _task_stack.push
  247. pop_task = _task_stack.pop
  248. _does_info = logger.isEnabledFor(logging.INFO)
  249. resultrepr_maxsize = task.resultrepr_maxsize
  250. prerun_receivers = signals.task_prerun.receivers
  251. postrun_receivers = signals.task_postrun.receivers
  252. success_receivers = signals.task_success.receivers
  253. from celery import canvas
  254. signature = canvas.maybe_signature # maybe_ does not clone if already
  255. def on_error(request, exc, uuid, state=FAILURE, call_errbacks=True):
  256. if propagate:
  257. raise
  258. I = Info(state, exc)
  259. R = I.handle_error_state(
  260. task, request, eager=eager, call_errbacks=call_errbacks,
  261. )
  262. return I, R, I.state, I.retval
  263. def trace_task(uuid, args, kwargs, request=None):
  264. # R - is the possibly prepared return value.
  265. # I - is the Info object.
  266. # T - runtime
  267. # Rstr - textual representation of return value
  268. # retval - is the always unmodified return value.
  269. # state - is the resulting task state.
  270. # This function is very long because we have unrolled all the calls
  271. # for performance reasons, and because the function is so long
  272. # we want the main variables (I, and R) to stand out visually from the
  273. # the rest of the variables, so breaking PEP8 is worth it ;)
  274. R = I = T = Rstr = retval = state = None
  275. task_request = None
  276. time_start = monotonic()
  277. try:
  278. try:
  279. kwargs.items
  280. except AttributeError:
  281. raise InvalidTaskError(
  282. 'Task keyword arguments is not a mapping')
  283. push_task(task)
  284. task_request = Context(request or {}, args=args,
  285. called_directly=False, kwargs=kwargs)
  286. root_id = task_request.root_id or uuid
  287. push_request(task_request)
  288. try:
  289. # -*- PRE -*-
  290. if prerun_receivers:
  291. send_prerun(sender=task, task_id=uuid, task=task,
  292. args=args, kwargs=kwargs)
  293. loader_task_init(uuid, task)
  294. if track_started:
  295. store_result(
  296. uuid, {'pid': pid, 'hostname': hostname}, STARTED,
  297. request=task_request,
  298. )
  299. # -*- TRACE -*-
  300. try:
  301. R = retval = fun(*args, **kwargs)
  302. state = SUCCESS
  303. except Reject as exc:
  304. I, R = Info(REJECTED, exc), ExceptionInfo(internal=True)
  305. state, retval = I.state, I.retval
  306. I.handle_reject(task, task_request)
  307. except Ignore as exc:
  308. I, R = Info(IGNORED, exc), ExceptionInfo(internal=True)
  309. state, retval = I.state, I.retval
  310. I.handle_ignore(task, task_request)
  311. except Retry as exc:
  312. I, R, state, retval = on_error(
  313. task_request, exc, uuid, RETRY, call_errbacks=False)
  314. except Exception as exc:
  315. I, R, state, retval = on_error(task_request, exc, uuid)
  316. except BaseException as exc:
  317. raise
  318. else:
  319. try:
  320. # callback tasks must be applied before the result is
  321. # stored, so that result.children is populated.
  322. # groups are called inline and will store trail
  323. # separately, so need to call them separately
  324. # so that the trail's not added multiple times :(
  325. # (Issue #1936)
  326. callbacks = task.request.callbacks
  327. if callbacks:
  328. if len(task.request.callbacks) > 1:
  329. sigs, groups = [], []
  330. for sig in callbacks:
  331. sig = signature(sig, app=app)
  332. if isinstance(sig, group):
  333. groups.append(sig)
  334. else:
  335. sigs.append(sig)
  336. for group_ in groups:
  337. group_.apply_async(
  338. (retval,),
  339. parent_id=uuid, root_id=root_id,
  340. )
  341. if sigs:
  342. group(sigs, app=app).apply_async(
  343. (retval,),
  344. parent_id=uuid, root_id=root_id,
  345. )
  346. else:
  347. signature(callbacks[0], app=app).apply_async(
  348. (retval,), parent_id=uuid, root_id=root_id,
  349. )
  350. # execute first task in chain
  351. chain = task_request.chain
  352. if chain:
  353. signature(chain.pop(), app=app).apply_async(
  354. (retval,), chain=chain,
  355. parent_id=uuid, root_id=root_id,
  356. )
  357. mark_as_done(
  358. uuid, retval, task_request, publish_result,
  359. )
  360. except EncodeError as exc:
  361. I, R, state, retval = on_error(task_request, exc, uuid)
  362. else:
  363. if task_on_success:
  364. task_on_success(retval, uuid, args, kwargs)
  365. if success_receivers:
  366. send_success(sender=task, result=retval)
  367. if _does_info:
  368. T = monotonic() - time_start
  369. Rstr = saferepr(R, resultrepr_maxsize)
  370. info(LOG_SUCCESS, {
  371. 'id': uuid, 'name': name,
  372. 'return_value': Rstr, 'runtime': T,
  373. })
  374. # -* POST *-
  375. if state not in IGNORE_STATES:
  376. if task_after_return:
  377. task_after_return(
  378. state, retval, uuid, args, kwargs, None,
  379. )
  380. finally:
  381. try:
  382. if postrun_receivers:
  383. send_postrun(sender=task, task_id=uuid, task=task,
  384. args=args, kwargs=kwargs,
  385. retval=retval, state=state)
  386. finally:
  387. pop_task()
  388. pop_request()
  389. if not eager:
  390. try:
  391. backend_cleanup()
  392. loader_cleanup()
  393. except (KeyboardInterrupt, SystemExit, MemoryError):
  394. raise
  395. except Exception as exc:
  396. logger.error('Process cleanup failed: %r', exc,
  397. exc_info=True)
  398. except MemoryError:
  399. raise
  400. except Exception as exc:
  401. if eager:
  402. raise
  403. R = report_internal_error(task, exc)
  404. if task_request is not None:
  405. I, _, _, _ = on_error(task_request, exc, uuid)
  406. return trace_ok_t(R, I, T, Rstr)
  407. return trace_task
  408. def trace_task(task, uuid, args, kwargs, request={}, **opts):
  409. try:
  410. if task.__trace__ is None:
  411. task.__trace__ = build_tracer(task.name, task, **opts)
  412. return task.__trace__(uuid, args, kwargs, request)
  413. except Exception as exc:
  414. return trace_ok_t(report_internal_error(task, exc), None, 0.0, None)
  415. def _trace_task_ret(name, uuid, request, body, content_type,
  416. content_encoding, loads=loads_message, app=None,
  417. **extra_request):
  418. app = app or current_app._get_current_object()
  419. embed = None
  420. if content_type:
  421. accept = prepare_accept_content(app.conf.accept_content)
  422. args, kwargs, embed = loads(
  423. body, content_type, content_encoding, accept=accept,
  424. )
  425. else:
  426. args, kwargs, embed = body
  427. hostname = gethostname()
  428. request.update({
  429. 'args': args, 'kwargs': kwargs,
  430. 'hostname': hostname, 'is_eager': False,
  431. }, **embed or {})
  432. R, I, T, Rstr = trace_task(app.tasks[name],
  433. uuid, args, kwargs, request, app=app)
  434. return (1, R, T) if I else (0, Rstr, T)
  435. trace_task_ret = _trace_task_ret
  436. def _fast_trace_task(task, uuid, request, body, content_type,
  437. content_encoding, loads=loads_message, _loc=_localized,
  438. hostname=None, **_):
  439. embed = None
  440. tasks, accept, hostname = _loc
  441. if content_type:
  442. args, kwargs, embed = loads(
  443. body, content_type, content_encoding, accept=accept,
  444. )
  445. else:
  446. args, kwargs, embed = body
  447. request.update({
  448. 'args': args, 'kwargs': kwargs,
  449. 'hostname': hostname, 'is_eager': False,
  450. }, **embed or {})
  451. R, I, T, Rstr = tasks[task].__trace__(
  452. uuid, args, kwargs, request,
  453. )
  454. return (1, R, T) if I else (0, Rstr, T)
  455. def report_internal_error(task, exc):
  456. _type, _value, _tb = sys.exc_info()
  457. try:
  458. _value = task.backend.prepare_exception(exc, 'pickle')
  459. exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
  460. warn(RuntimeWarning(
  461. 'Exception raised outside body: {0!r}:\n{1}'.format(
  462. exc, exc_info.traceback)))
  463. return exc_info
  464. finally:
  465. del(_tb)
  466. def setup_worker_optimizations(app, hostname=None):
  467. global trace_task_ret
  468. hostname = hostname or gethostname()
  469. # make sure custom Task.__call__ methods that calls super
  470. # will not mess up the request/task stack.
  471. _install_stack_protection()
  472. # all new threads start without a current app, so if an app is not
  473. # passed on to the thread it will fall back to the "default app",
  474. # which then could be the wrong app. So for the worker
  475. # we set this to always return our app. This is a hack,
  476. # and means that only a single app can be used for workers
  477. # running in the same process.
  478. app.set_current()
  479. set_default_app(app)
  480. # evaluate all task classes by finalizing the app.
  481. app.finalize()
  482. # set fast shortcut to task registry
  483. _localized[:] = [
  484. app._tasks,
  485. prepare_accept_content(app.conf.accept_content),
  486. hostname,
  487. ]
  488. trace_task_ret = _fast_trace_task
  489. from celery.worker import request as request_module
  490. request_module.trace_task_ret = _fast_trace_task
  491. request_module.__optimize__()
  492. def reset_worker_optimizations():
  493. global trace_task_ret
  494. trace_task_ret = _trace_task_ret
  495. try:
  496. delattr(BaseTask, '_stackprotected')
  497. except AttributeError:
  498. pass
  499. try:
  500. BaseTask.__call__ = _patched.pop('BaseTask.__call__')
  501. except KeyError:
  502. pass
  503. from celery.worker import request as request_module
  504. request_module.trace_task_ret = _trace_task_ret
  505. def _install_stack_protection():
  506. # Patches BaseTask.__call__ in the worker to handle the edge case
  507. # where people override it and also call super.
  508. #
  509. # - The worker optimizes away BaseTask.__call__ and instead
  510. # calls task.run directly.
  511. # - so with the addition of current_task and the request stack
  512. # BaseTask.__call__ now pushes to those stacks so that
  513. # they work when tasks are called directly.
  514. #
  515. # The worker only optimizes away __call__ in the case
  516. # where it has not been overridden, so the request/task stack
  517. # will blow if a custom task class defines __call__ and also
  518. # calls super().
  519. if not getattr(BaseTask, '_stackprotected', False):
  520. _patched['BaseTask.__call__'] = orig = BaseTask.__call__
  521. def __protected_call__(self, *args, **kwargs):
  522. stack = self.request_stack
  523. req = stack.top
  524. if req and not req._protected and \
  525. len(stack) == 1 and not req.called_directly:
  526. req._protected = 1
  527. return self.run(*args, **kwargs)
  528. return orig(self, *args, **kwargs)
  529. BaseTask.__call__ = __protected_call__
  530. BaseTask._stackprotected = True