bench_worker.py 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. import os
  2. import sys
  3. import time
  4. os.environ["NOSETPS"] = "yes"
  5. import anyjson
  6. JSONIMP = os.environ.get("JSONIMP")
  7. if JSONIMP:
  8. anyjson.force_implementation(JSONIMP)
  9. print("anyjson implementation: %r" % (anyjson.implementation.name, ))
  10. from celery import Celery, group
  11. DEFAULT_ITS = 20000
  12. BROKER_TRANSPORT = "librabbitmq"
  13. if hasattr(sys, "pypy_version_info"):
  14. BROKER_TRANSPORT = "amqplib"
  15. celery = Celery(__name__)
  16. celery.conf.update(BROKER_TRANSPORT=BROKER_TRANSPORT,
  17. BROKER_POOL_LIMIT=10,
  18. CELERYD_POOL="solo",
  19. CELERY_PREFETCH_MULTIPLIER=0,
  20. CELERY_DISABLE_RATE_LIMITS=True,
  21. CELERY_DEFAULT_DELIVERY_MODE=1,
  22. CELERY_QUEUES = {
  23. "bench.worker": {
  24. "exchange": "bench.worker",
  25. "routing_key": "bench.worker",
  26. "no_ack": True,
  27. "exchange_durable": False,
  28. "queue_durable": False,
  29. "auto_delete": True,
  30. }
  31. },
  32. CELERY_TASK_SERIALIZER="json",
  33. CELERY_DEFAULT_QUEUE="bench.worker",
  34. CELERY_BACKEND=None,
  35. )#CELERY_MESSAGE_COMPRESSION="zlib")
  36. def tdiff(then):
  37. return time.time() - then
  38. @celery.task(cur=0, time_start=None, queue="bench.worker")
  39. def it(_, n):
  40. i = it.cur # use internal counter, as ordering can be skewed
  41. # by previous runs, or the broker.
  42. if i and not i % 5000:
  43. print >> sys.stderr, "(%s so far: %ss)" % (i, tdiff(it.subt))
  44. it.subt = time.time()
  45. if not i:
  46. it.subt = it.time_start = time.time()
  47. elif i == n - 1:
  48. total = tdiff(it.time_start)
  49. print >> sys.stderr, "(%s so far: %ss)" % (i, tdiff(it.subt))
  50. print("-- process %s tasks: %ss total, %s tasks/s} " % (
  51. n, total, n / (total + .0)))
  52. sys.exit()
  53. it.cur += 1
  54. def bench_apply(n=DEFAULT_ITS):
  55. time_start = time.time()
  56. group(it.s(i, n) for i in xrange(n))()
  57. print("-- apply %s tasks: %ss" % (n, time.time() - time_start, ))
  58. def bench_work(n=DEFAULT_ITS, loglevel="CRITICAL"):
  59. loglevel = os.environ.get("BENCH_LOGLEVEL") or loglevel
  60. if loglevel:
  61. celery.log.setup_logging_subsystem(loglevel=loglevel)
  62. worker = celery.WorkController(concurrency=15,
  63. queues=["bench.worker"])
  64. try:
  65. print("STARTING WORKER")
  66. worker.start()
  67. except SystemExit:
  68. raise
  69. assert sum(worker.state.total_count.values()) == n + 1
  70. def bench_both(n=DEFAULT_ITS):
  71. bench_apply(n)
  72. bench_work(n)
  73. def main(argv=sys.argv):
  74. n = DEFAULT_ITS
  75. if len(argv) < 2:
  76. print("Usage: %s [apply|work|both] [n=20k]" % (
  77. os.path.basename(argv[0]), ))
  78. return sys.exit(1)
  79. try:
  80. try:
  81. n = int(argv[2])
  82. except IndexError:
  83. pass
  84. return {"apply": bench_apply,
  85. "work": bench_work,
  86. "both": bench_both}[argv[1]](n=n)
  87. except:
  88. raise
  89. if __name__ == "__main__":
  90. main()