bench_worker.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. import os
  2. import sys
  3. import time
  4. os.environ["NOSETPS"] = "yes"
  5. from threading import Lock
  6. class DLock(object):
  7. def __init__(self):
  8. self.l = Lock()
  9. def acquire(self, *args, **kwargs):
  10. print("ACQUIRE: %r %r" % (args, kwargs))
  11. import traceback
  12. traceback.print_stack()
  13. return self.l.acquire(*args, **kwargs)
  14. def release(self):
  15. print("RELEASE")
  16. return self.l.release()
  17. def __enter__(self):
  18. self.acquire()
  19. return self
  20. def __exit__(self, *exc_info):
  21. self.release()
  22. import threading
  23. #threading.Lock = DLock
  24. import anyjson
  25. JSONIMP = os.environ.get("JSONIMP")
  26. if JSONIMP:
  27. anyjson.force_implementation(JSONIMP)
  28. print("anyjson implementation: %r" % (anyjson.implementation.name, ))
  29. from celery import Celery
  30. DEFAULT_ITS = 20000
  31. celery = Celery(__name__)
  32. celery.conf.update(BROKER_TRANSPORT="librabbitmq",
  33. BROKER_POOL_LIMIT=10,
  34. CELERYD_POOL="solo",
  35. CELERY_PREFETCH_MULTIPLIER=0,
  36. CELERY_DISABLE_RATE_LIMITS=True,
  37. #CELERY_DEFAULT_DELIVERY_MODE="transient",
  38. CELERY_QUEUES = {
  39. "bench.worker": {
  40. "exchange": "bench.worker",
  41. "routing_key": "bench.worker",
  42. "no_ack": True,
  43. }
  44. },
  45. CELERY_TASK_SERIALIZER="json",
  46. CELERY_DEFAULT_QUEUE="bench.worker",
  47. CELERY_BACKEND=None,
  48. )#CELERY_MESSAGE_COMPRESSION="zlib")
  49. def tdiff(then):
  50. return time.time() - then
  51. @celery.task(cur=0, time_start=None, queue="bench.worker")
  52. def it(_, n):
  53. i = it.cur # use internal counter, as ordering can be skewed
  54. # by previous runs, or the broker.
  55. if i and not i % 5000:
  56. print >> sys.stderr, "(%s so far: %ss)" % (i, tdiff(it.subt))
  57. it.subt = time.time()
  58. if not i:
  59. it.subt = it.time_start = time.time()
  60. elif i == n - 1:
  61. total = tdiff(it.time_start)
  62. print >> sys.stderr, "(%s so far: %ss)" % (i, tdiff(it.subt))
  63. print("-- process %s tasks: %ss total, %s tasks/s} " % (
  64. n, total, n / (total + .0)))
  65. sys.exit()
  66. it.cur += 1
  67. def bench_apply(n=DEFAULT_ITS):
  68. time_start = time.time()
  69. celery.TaskSet(it.subtask((i, n)) for i in xrange(n)).apply_async()
  70. print("-- apply %s tasks: %ss" % (n, time.time() - time_start, ))
  71. def bench_work(n=DEFAULT_ITS, loglevel=None):
  72. loglevel = os.environ.get("BENCH_LOGLEVEL") or loglevel
  73. if loglevel:
  74. celery.log.setup_logging_subsystem(loglevel=loglevel)
  75. worker = celery.WorkController(concurrency=15,
  76. queues=["bench.worker"])
  77. try:
  78. print("STARTING WORKER")
  79. worker.start()
  80. except SystemExit:
  81. assert sum(worker.state.total_count.values()) == n + 1
  82. def bench_both(n=DEFAULT_ITS):
  83. bench_apply(n)
  84. bench_work(n)
  85. def main(argv=sys.argv):
  86. n = DEFAULT_ITS
  87. if len(argv) < 2:
  88. print("Usage: %s [apply|work|both] [n=20k]" % (
  89. os.path.basename(argv[0]), ))
  90. return sys.exit(1)
  91. try:
  92. try:
  93. n = int(argv[2])
  94. except IndexError:
  95. pass
  96. return {"apply": bench_apply,
  97. "work": bench_work,
  98. "both": bench_both}[argv[1]](n=n)
  99. except KeyboardInterrupt:
  100. pass
  101. if __name__ == "__main__":
  102. main()