bench_worker.py 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. from __future__ import absolute_import, print_function, unicode_literals
  2. import os
  3. import sys
  4. from kombu.five import monotonic # noqa
  5. from celery import Celery # noqa
  6. from celery.five import range # noqa
  7. os.environ.update(
  8. NOSETPS='yes',
  9. USE_FAST_LOCALS='yes',
  10. )
  11. DEFAULT_ITS = 40000
  12. BROKER_TRANSPORT = os.environ.get('BROKER', 'librabbitmq://')
  13. if hasattr(sys, 'pypy_version_info'):
  14. BROKER_TRANSPORT = 'pyamqp://'
  15. app = Celery('bench_worker')
  16. app.conf.update(
  17. broker_url=BROKER_TRANSPORT,
  18. broker_pool_limit=10,
  19. worker_pool='solo',
  20. worker_prefetch_multiplier=0,
  21. task_default_delivery_mode=1,
  22. task_queues={
  23. 'bench.worker': {
  24. 'exchange': 'bench.worker',
  25. 'routing_key': 'bench.worker',
  26. 'no_ack': True,
  27. 'exchange_durable': False,
  28. 'queue_durable': False,
  29. 'auto_delete': True,
  30. }
  31. },
  32. task_serializer='json',
  33. task_default_queue='bench.worker',
  34. result_backend=None,
  35. ),
  36. def tdiff(then):
  37. return monotonic() - then
  38. @app.task(cur=0, time_start=None, queue='bench.worker', bare=True)
  39. def it(_, n):
  40. # use internal counter, as ordering can be skewed
  41. # by previous runs, or the broker.
  42. i = it.cur
  43. if i and not i % 5000:
  44. print('({0} so far: {1}s)'.format(i, tdiff(it.subt)), file=sys.stderr)
  45. it.subt = monotonic()
  46. if not i:
  47. it.subt = it.time_start = monotonic()
  48. elif i > n - 2:
  49. total = tdiff(it.time_start)
  50. print('({0} so far: {1}s)'.format(i, tdiff(it.subt)), file=sys.stderr)
  51. print('-- process {0} tasks: {1}s total, {2} tasks/s} '.format(
  52. n, total, n / (total + .0),
  53. ))
  54. import os
  55. os._exit()
  56. it.cur += 1
  57. def bench_apply(n=DEFAULT_ITS):
  58. time_start = monotonic()
  59. task = it._get_current_object()
  60. with app.producer_or_acquire() as producer:
  61. [task.apply_async((i, n), producer=producer) for i in range(n)]
  62. print('-- apply {0} tasks: {1}s'.format(n, monotonic() - time_start))
  63. def bench_work(n=DEFAULT_ITS, loglevel='CRITICAL'):
  64. loglevel = os.environ.get('BENCH_LOGLEVEL') or loglevel
  65. if loglevel:
  66. app.log.setup_logging_subsystem(loglevel=loglevel)
  67. worker = app.WorkController(concurrency=15,
  68. queues=['bench.worker'])
  69. try:
  70. print('-- starting worker')
  71. worker.start()
  72. except SystemExit:
  73. raise
  74. assert sum(worker.state.total_count.values()) == n + 1
  75. def bench_both(n=DEFAULT_ITS):
  76. bench_apply(n)
  77. bench_work(n)
  78. def main(argv=sys.argv):
  79. n = DEFAULT_ITS
  80. if len(argv) < 2:
  81. print('Usage: {0} [apply|work|both] [n=20k]'.format(
  82. os.path.basename(argv[0]),
  83. ))
  84. return sys.exit(1)
  85. try:
  86. try:
  87. n = int(argv[2])
  88. except IndexError:
  89. pass
  90. return {'apply': bench_apply,
  91. 'work': bench_work,
  92. 'both': bench_both}[argv[1]](n=n)
  93. except:
  94. raise
  95. if __name__ == '__main__':
  96. main()