celeryd.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. # -*- coding: utf-8 -*-
  2. """celeryd
  3. .. program:: celeryd
  4. .. cmdoption:: -c, --concurrency
  5. Number of child processes processing the queue. The default
  6. is the number of CPUs available on your system.
  7. .. cmdoption:: -f, --logfile
  8. Path to log file. If no logfile is specified, `stderr` is used.
  9. .. cmdoption:: -l, --loglevel
  10. Logging level, choose between `DEBUG`, `INFO`, `WARNING`,
  11. `ERROR`, `CRITICAL`, or `FATAL`.
  12. .. cmdoption:: -n, --hostname
  13. Set custom hostname.
  14. .. cmdoption:: -B, --beat
  15. Also run the `celerybeat` periodic task scheduler. Please note that
  16. there must only be one instance of this service.
  17. .. cmdoption:: -Q, --queues
  18. List of queues to enable for this worker, separated by comma.
  19. By default all configured queues are enabled.
  20. Example: `-Q video,image`
  21. .. cmdoption:: -I, --include
  22. Comma separated list of additional modules to import.
  23. Example: -I foo.tasks,bar.tasks
  24. .. cmdoption:: -s, --schedule
  25. Path to the schedule database if running with the `-B` option.
  26. Defaults to `celerybeat-schedule`. The extension ".db" will be
  27. appended to the filename.
  28. .. cmdoption:: --scheduler
  29. Scheduler class to use. Default is celery.beat.PersistentScheduler
  30. .. cmdoption:: -E, --events
  31. Send events that can be captured by monitors like `celerymon`.
  32. .. cmdoption:: --purge, --discard
  33. Discard all waiting tasks before the daemon is started.
  34. **WARNING**: This is unrecoverable, and the tasks will be
  35. deleted from the messaging server.
  36. .. cmdoption:: --time-limit
  37. Enables a hard time limit (in seconds) for tasks.
  38. .. cmdoption:: --soft-time-limit
  39. Enables a soft time limit (in seconds) for tasks.
  40. .. cmdoption:: --maxtasksperchild
  41. Maximum number of tasks a pool worker can execute before it's
  42. terminated and replaced by a new worker.
  43. """
  44. from __future__ import absolute_import
  45. import sys
  46. try:
  47. from multiprocessing import freeze_support
  48. except ImportError: # pragma: no cover
  49. freeze_support = lambda: True # noqa
  50. from celery.bin.base import Command, Option
  51. class WorkerCommand(Command):
  52. namespace = "celeryd"
  53. enable_config_from_cmdline = True
  54. supports_args = False
  55. def run(self, *args, **kwargs):
  56. kwargs.pop("app", None)
  57. # Pools like eventlet/gevent needs to patch libs as early
  58. # as possible.
  59. from celery import concurrency
  60. kwargs["pool"] = concurrency.get_implementation(
  61. kwargs.get("pool") or self.app.conf.CELERYD_POOL)
  62. return self.app.Worker(**kwargs).run()
  63. def get_options(self):
  64. conf = self.app.conf
  65. return (
  66. Option('-c', '--concurrency',
  67. default=conf.CELERYD_CONCURRENCY,
  68. action="store", dest="concurrency", type="int",
  69. help="Number of worker threads/processes"),
  70. Option('-P', '--pool',
  71. default=conf.CELERYD_POOL,
  72. action="store", dest="pool", type="str",
  73. help="Pool implementation: "
  74. "processes (default), eventlet, gevent, "
  75. "solo or threads."),
  76. Option('--purge', '--discard', default=False,
  77. action="store_true", dest="discard",
  78. help="Discard all waiting tasks before the server is"
  79. "started. WARNING: There is no undo operation "
  80. "and the tasks will be deleted."),
  81. Option('-f', '--logfile', default=conf.CELERYD_LOG_FILE,
  82. action="store", dest="logfile",
  83. help="Path to log file."),
  84. Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL,
  85. action="store", dest="loglevel",
  86. help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL"),
  87. Option('-n', '--hostname', default=None,
  88. action="store", dest="hostname",
  89. help="Set custom host name. E.g. 'foo.example.com'."),
  90. Option('-B', '--beat', default=False,
  91. action="store_true", dest="run_clockservice",
  92. help="Also run the celerybeat periodic task scheduler. "
  93. "NOTE: Only one instance of celerybeat must be"
  94. "running at any one time."),
  95. Option('-s', '--schedule',
  96. default=conf.CELERYBEAT_SCHEDULE_FILENAME,
  97. action="store", dest="schedule",
  98. help="Path to the schedule database if running with the -B "
  99. "option. The extension '.db' will be appended to the "
  100. "filename. Default: %s" % (
  101. conf.CELERYBEAT_SCHEDULE_FILENAME, )),
  102. Option('--scheduler',
  103. default=None,
  104. action="store", dest="scheduler_cls",
  105. help="Scheduler class. Default is "
  106. "celery.beat.PersistentScheduler"),
  107. Option('-S', '--statedb', default=conf.CELERYD_STATE_DB,
  108. action="store", dest="db",
  109. help="Path to the state database. The extension '.db' will "
  110. "be appended to the filename. Default: %s" % (
  111. conf.CELERYD_STATE_DB, )),
  112. Option('-E', '--events', default=conf.CELERY_SEND_EVENTS,
  113. action="store_true", dest="events",
  114. help="Send events so the worker can be monitored by "
  115. "celeryev, celerymon and other monitors.."),
  116. Option('--time-limit',
  117. default=conf.CELERYD_TASK_TIME_LIMIT,
  118. action="store", type="int", dest="task_time_limit",
  119. help="Enables a hard time limit (in seconds) for tasks."),
  120. Option('--soft-time-limit',
  121. default=conf.CELERYD_TASK_SOFT_TIME_LIMIT,
  122. action="store", type="int", dest="task_soft_time_limit",
  123. help="Enables a soft time limit (in seconds) for tasks."),
  124. Option('--maxtasksperchild',
  125. default=conf.CELERYD_MAX_TASKS_PER_CHILD,
  126. action="store", type="int", dest="max_tasks_per_child",
  127. help="Maximum number of tasks a pool worker can execute"
  128. "before it's terminated and replaced by a new worker."),
  129. Option('--queues', '-Q', default=[],
  130. action="store", dest="queues",
  131. help="Comma separated list of queues to consume from. "
  132. "By default all configured queues are used. "
  133. "Example: -Q video,image"),
  134. Option('--include', '-I', default=[],
  135. action="store", dest="include",
  136. help="Comma separated list of additional modules to import. "
  137. "Example: -I foo.tasks,bar.tasks"),
  138. Option('--pidfile', default=None,
  139. help="Optional file used to store the workers pid. "
  140. "The worker will not start if this file already exists "
  141. "and the pid is still alive."),
  142. Option('--autoscale', default=None,
  143. help="Enable autoscaling by providing "
  144. "max_concurrency,min_concurrency. Example: "
  145. "--autoscale=10,3 (always keep 3 processes, "
  146. "but grow to 10 if necessary)."),
  147. )
  148. def main():
  149. freeze_support()
  150. worker = WorkerCommand()
  151. worker.execute_from_commandline()
  152. def windows_main():
  153. sys.stderr.write("""
  154. The celeryd command does not work on Windows.
  155. Instead, please use:
  156. ..> python -m celery.bin.celeryd
  157. You can also supply arguments:
  158. ..> python -m celery.bin.celeryd --concurrency=10 --loglevel=DEBUG
  159. """.strip())
  160. if __name__ == "__main__": # pragma: no cover
  161. main()