ソースを参照

Merge remote branch 'upstream/master'

Conflicts:
	celery/db/session.py
rnoel 13 年 前
コミット
bd1e7e9d59
43 ファイル変更649 行追加206 行削除
  1. 1 0
      AUTHORS
  2. 2 1
      celery/app/__init__.py
  3. 14 32
      celery/app/amqp.py
  4. 2 1
      celery/app/base.py
  5. 6 11
      celery/apps/worker.py
  6. 2 1
      celery/backends/amqp.py
  7. 13 3
      celery/backends/base.py
  8. 2 1
      celery/bin/celerybeat.py
  9. 12 2
      celery/bin/celeryd_detach.py
  10. 7 3
      celery/bin/celeryd_multi.py
  11. 2 1
      celery/bin/celeryev.py
  12. 1 1
      celery/db/session.py
  13. 2 1
      celery/events/__init__.py
  14. 2 1
      celery/events/state.py
  15. 2 1
      celery/result.py
  16. 2 2
      celery/task/__init__.py
  17. 6 1
      celery/task/control.py
  18. 2 1
      celery/task/sets.py
  19. 1 1
      celery/tests/test_app/test_app_amqp.py
  20. 2 1
      celery/tests/test_app/test_app_defaults.py
  21. 2 1
      celery/tests/test_backends/test_redis.py
  22. 5 3
      celery/tests/test_worker/test_worker.py
  23. 8 0
      celery/tests/test_worker/test_worker_control.py
  24. 0 1
      celery/tests/utils.py
  25. 2 1
      celery/utils/__init__.py
  26. 2 1
      celery/utils/timer2.py
  27. 3 1
      celery/worker/__init__.py
  28. 63 18
      celery/worker/autoscale.py
  29. 2 1
      celery/worker/buckets.py
  30. 5 3
      celery/worker/consumer.py
  31. 34 12
      celery/worker/control/builtins.py
  32. 2 0
      contrib/debian/README.rst
  33. 0 0
      contrib/debian/init.d-deprecated/celerybeat
  34. 0 0
      contrib/debian/init.d-deprecated/celeryd
  35. 0 0
      contrib/debian/init.d-deprecated/celeryevcam
  36. 206 0
      contrib/generic-init.d/celerybeat
  37. 217 0
      contrib/generic-init.d/celeryevcam
  38. 6 93
      docs/cookbook/daemonizing.rst
  39. 2 2
      docs/internals/protocol.rst
  40. 4 0
      docs/userguide/tasks.rst
  41. 1 1
      requirements/default.txt
  42. 1 1
      setup.cfg
  43. 1 1
      setup.py

+ 1 - 0
AUTHORS

@@ -77,3 +77,4 @@ Ordered by date of first contribution:
   Mauro Rocco <fireantology@gmail.com>
   Mauro Rocco <fireantology@gmail.com>
   Matthew J Morrison <mattj.morrison@gmail.com>
   Matthew J Morrison <mattj.morrison@gmail.com>
   Daniel Watkins <daniel@daniel-watkins.co.uk>
   Daniel Watkins <daniel@daniel-watkins.co.uk>
+  rnoel <rnoel@ltutech.com>

+ 2 - 1
celery/app/__init__.py

@@ -90,7 +90,8 @@ class App(base.BaseApp):
     def TaskSet(self, *args, **kwargs):
     def TaskSet(self, *args, **kwargs):
         """Create new :class:`~celery.task.sets.TaskSet`."""
         """Create new :class:`~celery.task.sets.TaskSet`."""
         from celery.task.sets import TaskSet
         from celery.task.sets import TaskSet
-        return TaskSet(*args, app=self, **kwargs)
+        kwargs["app"] = self
+        return TaskSet(*args, **kwargs)
 
 
     def worker_main(self, argv=None):
     def worker_main(self, argv=None):
         """Run :program:`celeryd` using `argv`.  Uses :data:`sys.argv`
         """Run :program:`celeryd` using `argv`.  Uses :data:`sys.argv`

+ 14 - 32
celery/app/amqp.py

@@ -12,14 +12,13 @@ AMQ related functionality.
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
 
 
 from kombu import BrokerConnection, Exchange
 from kombu import BrokerConnection, Exchange
-from kombu.connection import Resource
 from kombu import compat as messaging
 from kombu import compat as messaging
+from kombu.pools import ProducerPool
 from kombu.utils import cached_property
 from kombu.utils import cached_property
 
 
 from celery import routes as _routes
 from celery import routes as _routes
 from celery import signals
 from celery import signals
 from celery.utils import gen_unique_id, textindent
 from celery.utils import gen_unique_id, textindent
-from celery.utils import promise, maybe_promise
 
 
 #: List of known options to a Kombu producers send method.
 #: List of known options to a Kombu producers send method.
 #: Used to extract the message related options out of any `dict`.
 #: Used to extract the message related options out of any `dict`.
@@ -90,6 +89,8 @@ class Queues(dict):
     def format(self, indent=0, indent_first=True):
     def format(self, indent=0, indent_first=True):
         """Format routing table into string for log dumps."""
         """Format routing table into string for log dumps."""
         active = self.consume_from
         active = self.consume_from
+        if not active:
+            return ""
         info = [QUEUE_FORMAT.strip() % dict(
         info = [QUEUE_FORMAT.strip() % dict(
                     name=(name + ":").ljust(12), **config)
                     name=(name + ":").ljust(12), **config)
                         for name, config in sorted(active.iteritems())]
                         for name, config in sorted(active.iteritems())]
@@ -132,6 +133,8 @@ class Queues(dict):
     def with_defaults(cls, queues, default_exchange, default_exchange_type):
     def with_defaults(cls, queues, default_exchange, default_exchange_type):
         """Alternate constructor that adds default exchange and
         """Alternate constructor that adds default exchange and
         exchange type information to queues that does not have any."""
         exchange type information to queues that does not have any."""
+        if queues is None:
+            queues = {}
         for opts in queues.values():
         for opts in queues.values():
             opts.setdefault("exchange", default_exchange),
             opts.setdefault("exchange", default_exchange),
             opts.setdefault("exchange_type", default_exchange_type)
             opts.setdefault("exchange_type", default_exchange_type)
@@ -248,40 +251,19 @@ class TaskPublisher(messaging.Publisher):
             self.close()
             self.close()
 
 
 
 
-class PublisherPool(Resource):
+class PublisherPool(ProducerPool):
 
 
-    def __init__(self, app=None):
+    def __init__(self, app):
         self.app = app
         self.app = app
-        super(PublisherPool, self).__init__(limit=self.app.pool.limit)
+        super(PublisherPool, self).__init__(self.app.pool,
+                                            limit=self.app.pool.limit)
 
 
-    def create_publisher(self):
-        conn = self.app.pool.acquire(block=True)
+    def create_producer(self):
+        conn = self.connections.acquire(block=True)
         pub = self.app.amqp.TaskPublisher(conn, auto_declare=False)
         pub = self.app.amqp.TaskPublisher(conn, auto_declare=False)
-        conn._publisher_chan = pub.channel
-        return pub
-
-    def new(self):
-        return promise(self.create_publisher)
-
-    def setup(self):
-        if self.limit:
-            for _ in xrange(self.limit):
-                self._resource.put_nowait(self.new())
-
-    def prepare(self, publisher):
-        pub = maybe_promise(publisher)
-        if not pub.connection:
-            pub.connection = self.app.pool.acquire(block=True)
-            if not getattr(pub.connection, "_publisher_chan", None):
-                pub.connection._publisher_chan = pub.connection.channel()
-            pub.revive(pub.connection._publisher_chan)
+        conn._producer_chan = pub.channel
         return pub
         return pub
 
 
-    def release(self, resource):
-        resource.connection.release()
-        resource.connection = None
-        super(PublisherPool, self).release(resource)
-
 
 
 class AMQP(object):
 class AMQP(object):
     BrokerConnection = BrokerConnection
     BrokerConnection = BrokerConnection
@@ -302,7 +284,7 @@ class AMQP(object):
         """Create new :class:`Queues` instance, using queue defaults
         """Create new :class:`Queues` instance, using queue defaults
         from the current configuration."""
         from the current configuration."""
         conf = self.app.conf
         conf = self.app.conf
-        if not queues:
+        if not queues and conf.CELERY_DEFAULT_QUEUE:
             queues = {conf.CELERY_DEFAULT_QUEUE: {
             queues = {conf.CELERY_DEFAULT_QUEUE: {
                         "exchange": conf.CELERY_DEFAULT_EXCHANGE,
                         "exchange": conf.CELERY_DEFAULT_EXCHANGE,
                         "exchange_type": conf.CELERY_DEFAULT_EXCHANGE_TYPE,
                         "exchange_type": conf.CELERY_DEFAULT_EXCHANGE_TYPE,
@@ -367,4 +349,4 @@ class AMQP(object):
 
 
     @cached_property
     @cached_property
     def publisher_pool(self):
     def publisher_pool(self):
-        return PublisherPool(app=self.app)
+        return PublisherPool(self.app)

+ 2 - 1
celery/app/base.py

@@ -8,7 +8,8 @@ Application Base Class.
 :license: BSD, see LICENSE for more details.
 :license: BSD, see LICENSE for more details.
 
 
 """
 """
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import platform as _platform
 import platform as _platform
 import sys
 import sys

+ 6 - 11
celery/apps/worker.py

@@ -281,12 +281,10 @@ def install_worker_int_handler(worker):
     def _stop(signum, frame):
     def _stop(signum, frame):
         process_name = get_process_name()
         process_name = get_process_name()
         if not process_name or process_name == "MainProcess":
         if not process_name or process_name == "MainProcess":
-            worker.logger.warn(
-                "celeryd: Hitting Ctrl+C again will terminate "
-                "all running tasks!")
+            print("celeryd: Hitting Ctrl+C again will terminate "
+                  "all running tasks!")
             install_worker_int_again_handler(worker)
             install_worker_int_again_handler(worker)
-            worker.logger.warn("celeryd: Warm shutdown (%s)" % (
-                process_name))
+            print("celeryd: Warm shutdown (%s)" % (process_name, ))
             worker.stop(in_sighandler=True)
             worker.stop(in_sighandler=True)
         raise SystemExit()
         raise SystemExit()
 
 
@@ -298,8 +296,7 @@ def install_worker_int_again_handler(worker):
     def _stop(signum, frame):
     def _stop(signum, frame):
         process_name = get_process_name()
         process_name = get_process_name()
         if not process_name or process_name == "MainProcess":
         if not process_name or process_name == "MainProcess":
-            worker.logger.warn("celeryd: Cold shutdown (%s)" % (
-                process_name))
+            print("celeryd: Cold shutdown (%s)" % (process_name, ))
             worker.terminate(in_sighandler=True)
             worker.terminate(in_sighandler=True)
         raise SystemTerminate()
         raise SystemTerminate()
 
 
@@ -311,8 +308,7 @@ def install_worker_term_handler(worker):
     def _stop(signum, frame):
     def _stop(signum, frame):
         process_name = get_process_name()
         process_name = get_process_name()
         if not process_name or process_name == "MainProcess":
         if not process_name or process_name == "MainProcess":
-            worker.logger.warn("celeryd: Warm shutdown (%s)" % (
-                process_name))
+            print("celeryd: Warm shutdown (%s)" % (process_name, ))
             worker.stop(in_sighandler=True)
             worker.stop(in_sighandler=True)
         raise SystemExit()
         raise SystemExit()
 
 
@@ -323,8 +319,7 @@ def install_worker_restart_handler(worker):
 
 
     def restart_worker_sig_handler(signum, frame):
     def restart_worker_sig_handler(signum, frame):
         """Signal handler restarting the current python program."""
         """Signal handler restarting the current python program."""
-        worker.logger.warn("Restarting celeryd (%s)" % (
-            " ".join(sys.argv)))
+        print("Restarting celeryd (%s)" % (" ".join(sys.argv), ))
         worker.stop(in_sighandler=True)
         worker.stop(in_sighandler=True)
         os.execv(sys.executable, [sys.executable] + sys.argv)
         os.execv(sys.executable, [sys.executable] + sys.argv)
 
 

+ 2 - 1
celery/backends/amqp.py

@@ -1,5 +1,6 @@
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import socket
 import socket
 import threading
 import threading

+ 13 - 3
celery/backends/base.py

@@ -1,5 +1,6 @@
 """celery.backends.base"""
 """celery.backends.base"""
 import time
 import time
+import sys
 
 
 from datetime import timedelta
 from datetime import timedelta
 
 
@@ -8,6 +9,7 @@ from celery.exceptions import TimeoutError, TaskRevokedError
 from celery.utils import timeutils
 from celery.utils import timeutils
 from celery.utils.serialization import pickle, get_pickled_exception
 from celery.utils.serialization import pickle, get_pickled_exception
 from celery.utils.serialization import get_pickleable_exception
 from celery.utils.serialization import get_pickleable_exception
+from celery.utils.serialization import create_exception_cls
 from celery.datastructures import LocalCache
 from celery.datastructures import LocalCache
 
 
 
 
@@ -33,7 +35,7 @@ class BaseBackend(object):
         return value
         return value
 
 
     def encode_result(self, result, status):
     def encode_result(self, result, status):
-        if status in self.EXCEPTION_STATES:
+        if status in self.EXCEPTION_STATES and isinstance(result, Exception):
             return self.prepare_exception(result)
             return self.prepare_exception(result)
         else:
         else:
             return self.prepare_value(result)
             return self.prepare_value(result)
@@ -68,11 +70,19 @@ class BaseBackend(object):
 
 
     def prepare_exception(self, exc):
     def prepare_exception(self, exc):
         """Prepare exception for serialization."""
         """Prepare exception for serialization."""
-        return get_pickleable_exception(exc)
+        if (self.app.conf["CELERY_RESULT_SERIALIZER"] in ("pickle", "yaml")):
+            return get_pickleable_exception(exc)
+        return {
+            "exc_type": type(exc).__name__,
+            "exc_message": str(exc),
+        }
 
 
     def exception_to_python(self, exc):
     def exception_to_python(self, exc):
         """Convert serialized exception to Python exception."""
         """Convert serialized exception to Python exception."""
-        return get_pickled_exception(exc)
+        if (self.app.conf["CELERY_RESULT_SERIALIZER"] in ("pickle", "yaml")):
+            return get_pickled_exception(exc)
+        return create_exception_cls(exc["exc_type"].encode("utf-8"),
+                                    sys.modules[__name__])
 
 
     def prepare_value(self, result):
     def prepare_value(self, result):
         """Prepare value for storage."""
         """Prepare value for storage."""

+ 2 - 1
celery/bin/celerybeat.py

@@ -22,7 +22,8 @@
     `ERROR`, `CRITICAL`, or `FATAL`.
     `ERROR`, `CRITICAL`, or `FATAL`.
 
 
 """
 """
-from __future__ import with_statement, absolute_import
+from __future__ import with_statement
+from __future__ import absolute_import
 
 
 from functools import partial
 from functools import partial
 
 

+ 12 - 2
celery/bin/celeryd_detach.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import os
 import os
 import sys
 import sys
@@ -111,10 +112,19 @@ class detached_celeryd(object):
     def execute_from_commandline(self, argv=None):
     def execute_from_commandline(self, argv=None):
         if argv is None:
         if argv is None:
             argv = sys.argv
             argv = sys.argv
+        config = []
+        seen_cargs = 0
+        for arg in argv:
+            if seen_cargs:
+                config.append(arg)
+            else:
+                if arg == "--":
+                    seen_cargs = 1
+                    config.append(arg)
         prog_name = os.path.basename(argv[0])
         prog_name = os.path.basename(argv[0])
         options, values, leftovers = self.parse_options(prog_name, argv[1:])
         options, values, leftovers = self.parse_options(prog_name, argv[1:])
         detach(path=self.execv_path,
         detach(path=self.execv_path,
-               argv=self.execv_argv + leftovers,
+               argv=self.execv_argv + leftovers + config,
                **vars(options))
                **vars(options))
 
 
 
 

+ 7 - 3
celery/bin/celeryd_multi.py

@@ -131,7 +131,8 @@ def main():
 class MultiTool(object):
 class MultiTool(object):
     retcode = 0  # Final exit code.
     retcode = 0  # Final exit code.
 
 
-    def __init__(self):
+    def __init__(self, env=None):
+        self.env = env
         self.commands = {"start": self.start,
         self.commands = {"start": self.start,
                          "show": self.show,
                          "show": self.show,
                          "stop": self.stop,
                          "stop": self.stop,
@@ -348,8 +349,9 @@ class MultiTool(object):
         self.note(c.cyan("celeryd-multi v%s" % __version__))
         self.note(c.cyan("celeryd-multi v%s" % __version__))
 
 
     def waitexec(self, argv, path=sys.executable):
     def waitexec(self, argv, path=sys.executable):
-        argstr = shlex.split(" ".join([path] + list(argv)))
-        pipe = Popen(argstr)
+        args = " ".join([path] + list(argv))
+        argstr = shlex.split(args.encode("utf-8"))
+        pipe = Popen(argstr, env=self.env)
         self.info("  %s" % " ".join(argstr))
         self.info("  %s" % " ".join(argstr))
         retcode = pipe.wait()
         retcode = pipe.wait()
         if retcode < 0:
         if retcode < 0:
@@ -394,6 +396,8 @@ def multi_args(p, cmd="celeryd", append="", prefix="", suffix=""):
                    options.pop("-n", socket.gethostname()))
                    options.pop("-n", socket.gethostname()))
     prefix = options.pop("--prefix", prefix) or ""
     prefix = options.pop("--prefix", prefix) or ""
     suffix = options.pop("--suffix", suffix) or "." + hostname
     suffix = options.pop("--suffix", suffix) or "." + hostname
+    if suffix in ('""', "''"):
+        suffix = ""
 
 
     for ns_name, ns_opts in p.namespaces.items():
     for ns_name, ns_opts in p.namespaces.items():
         if "," in ns_name or (ranges and "-" in ns_name):
         if "," in ns_name or (ranges and "-" in ns_name):

+ 2 - 1
celery/bin/celeryev.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import sys
 import sys
 
 

+ 1 - 1
celery/db/session.py

@@ -20,7 +20,7 @@ def get_engine(dburi, **kwargs):
 def create_session(dburi, **kwargs):
 def create_session(dburi, **kwargs):
     engine = get_engine(dburi, **kwargs)
     engine = get_engine(dburi, **kwargs)
     if dburi not in _MAKERS:
     if dburi not in _MAKERS:
-      _MAKERS[dburi] = sessionmaker(bind=engine)
+        _MAKERS[dburi] = sessionmaker(bind=engine)
     return engine, _MAKERS[dburi]
     return engine, _MAKERS[dburi]
 
 
 
 

+ 2 - 1
celery/events/__init__.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import time
 import time
 import socket
 import socket

+ 2 - 1
celery/events/state.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import time
 import time
 import heapq
 import heapq

+ 2 - 1
celery/result.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import time
 import time
 
 

+ 2 - 2
celery/task/__init__.py

@@ -18,7 +18,7 @@ def task(*args, **kwargs):
 
 
     .. code-block:: python
     .. code-block:: python
 
 
-        @task()
+        @task
         def refresh_feed(url):
         def refresh_feed(url):
             return Feed.objects.get(url=url).refresh()
             return Feed.objects.get(url=url).refresh()
 
 
@@ -51,7 +51,7 @@ def periodic_task(*args, **options):
 
 
             .. code-block:: python
             .. code-block:: python
 
 
-                @task()
+                @task
                 def refresh_feed(url):
                 def refresh_feed(url):
                     return Feed.objects.get(url=url).refresh()
                     return Feed.objects.get(url=url).refresh()
 
 

+ 6 - 1
celery/task/control.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 from kombu.pidbox import Mailbox
 from kombu.pidbox import Mailbox
 
 
@@ -207,6 +208,10 @@ class Control(object):
 
 
         """
         """
         with self.app.default_connection(connection, connect_timeout) as conn:
         with self.app.default_connection(connection, connect_timeout) as conn:
+            if channel is None:
+                if not getattr(conn, "_publisher_chan", None):
+                    conn._publisher_chan = conn.channel()
+                channel = conn._publisher_chan
             return self.mailbox(conn)._broadcast(command, arguments,
             return self.mailbox(conn)._broadcast(command, arguments,
                                                  destination, reply, timeout,
                                                  destination, reply, timeout,
                                                  limit, callback,
                                                  limit, callback,

+ 2 - 1
celery/task/sets.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import warnings
 import warnings
 
 

+ 1 - 1
celery/tests/test_app/test_app_amqp.py

@@ -90,7 +90,7 @@ class test_PublisherPool(AppCase):
 
 
             p1 = r1 = pool.acquire()
             p1 = r1 = pool.acquire()
             p2 = r2 = pool.acquire()
             p2 = r2 = pool.acquire()
-            delattr(r1.connection, "_publisher_chan")
+            delattr(r1.connection, "_producer_chan")
             r1.release()
             r1.release()
             r2.release()
             r2.release()
             r1 = pool.acquire()
             r1 = pool.acquire()

+ 2 - 1
celery/tests/test_app/test_app_defaults.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import sys
 import sys
 
 

+ 2 - 1
celery/tests/test_backends/test_redis.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import sys
 import sys
 import socket
 import socket

+ 5 - 3
celery/tests/test_worker/test_worker.py

@@ -86,11 +86,13 @@ def foo_periodic_task():
     return "foo"
     return "foo"
 
 
 
 
-def create_message(backend, **data):
+def create_message(channel, **data):
     data.setdefault("id", gen_unique_id())
     data.setdefault("id", gen_unique_id())
-    return Message(backend, body=pickle.dumps(dict(**data)),
+    channel.no_ack_consumers = set()
+    return Message(channel, body=pickle.dumps(dict(**data)),
                    content_type="application/x-python-serialize",
                    content_type="application/x-python-serialize",
-                   content_encoding="binary")
+                   content_encoding="binary",
+                   delivery_info={"consumer_tag": "mock"})
 
 
 
 
 class test_QoS(unittest.TestCase):
 class test_QoS(unittest.TestCase):

+ 8 - 0
celery/tests/test_worker/test_worker_control.py

@@ -29,6 +29,10 @@ def mytask():
     pass
     pass
 
 
 
 
+class WorkController(object):
+    autoscaler = None
+
+
 class Consumer(object):
 class Consumer(object):
 
 
     def __init__(self):
     def __init__(self):
@@ -40,6 +44,7 @@ class Consumer(object):
         self.eta_schedule = Timer()
         self.eta_schedule = Timer()
         self.app = current_app
         self.app = current_app
         self.event_dispatcher = Mock()
         self.event_dispatcher = Mock()
+        self.controller = WorkController()
 
 
         from celery.concurrency.base import BasePool
         from celery.concurrency.base import BasePool
         self.pool = BasePool(10)
         self.pool = BasePool(10)
@@ -192,6 +197,9 @@ class test_ControlPanel(unittest.TestCase):
             def cancel_by_queue(self, queue):
             def cancel_by_queue(self, queue):
                 self.cancelled.append(queue)
                 self.cancelled.append(queue)
 
 
+            def consuming_from(self, queue):
+                return queue in self.queues
+
         consumer = Consumer()
         consumer = Consumer()
         consumer.task_consumer = MockConsumer()
         consumer.task_consumer = MockConsumer()
         panel = self.create_panel(consumer=consumer)
         panel = self.create_panel(consumer=consumer)

+ 0 - 1
celery/tests/utils.py

@@ -54,7 +54,6 @@ def skip_unless_module(module):
     return _inner
     return _inner
 
 
 
 
-
 class AppCase(unittest.TestCase):
 class AppCase(unittest.TestCase):
 
 
     def setUp(self):
     def setUp(self):

+ 2 - 1
celery/utils/__init__.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import os
 import os
 import sys
 import sys

+ 2 - 1
celery/utils/timer2.py

@@ -1,5 +1,6 @@
 """timer2 - Scheduler for Python functions."""
 """timer2 - Scheduler for Python functions."""
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import atexit
 import atexit
 import heapq
 import heapq

+ 3 - 1
celery/worker/__init__.py

@@ -190,6 +190,7 @@ class WorkController(object):
             # needs a custom implementation.
             # needs a custom implementation.
             self.eta_scheduler_cls = self.pool.Timer
             self.eta_scheduler_cls = self.pool.Timer
 
 
+        self.autoscaler = None
         if autoscale:
         if autoscale:
             self.autoscaler = instantiate(self.autoscaler_cls, self.pool,
             self.autoscaler = instantiate(self.autoscaler_cls, self.pool,
                                           max_concurrency=max_concurrency,
                                           max_concurrency=max_concurrency,
@@ -226,7 +227,8 @@ class WorkController(object):
                                     initial_prefetch_count=prefetch_count,
                                     initial_prefetch_count=prefetch_count,
                                     pool=self.pool,
                                     pool=self.pool,
                                     priority_timer=self.priority_timer,
                                     priority_timer=self.priority_timer,
-                                    app=self.app)
+                                    app=self.app,
+                                    controller=self)
 
 
         # The order is important here;
         # The order is important here;
         #   the first in the list is the first to start,
         #   the first in the list is the first to start,

+ 63 - 18
celery/worker/autoscale.py

@@ -1,3 +1,6 @@
+from __future__ import absolute_import
+from __future__ import with_statement
+
 import os
 import os
 import sys
 import sys
 import threading
 import threading
@@ -14,6 +17,7 @@ class Autoscaler(threading.Thread):
             keepalive=30, logger=None):
             keepalive=30, logger=None):
         threading.Thread.__init__(self)
         threading.Thread.__init__(self)
         self.pool = pool
         self.pool = pool
+        self.mutex = threading.Lock()
         self.max_concurrency = max_concurrency
         self.max_concurrency = max_concurrency
         self.min_concurrency = min_concurrency
         self.min_concurrency = min_concurrency
         self.keepalive = keepalive
         self.keepalive = keepalive
@@ -27,38 +31,73 @@ class Autoscaler(threading.Thread):
         assert self.keepalive, "can't scale down too fast."
         assert self.keepalive, "can't scale down too fast."
 
 
     def scale(self):
     def scale(self):
-        current = min(self.qty, self.max_concurrency)
-        if current > self.processes:
-            self.scale_up(current - self.processes)
-        elif current < self.processes:
-            self.scale_down((self.processes - current) - self.min_concurrency)
-        sleep(1.0)
+        with self.mutex:
+            current = min(self.qty, self.max_concurrency)
+            if current > self.processes:
+                self.scale_up(current - self.processes)
+            elif current < self.processes:
+                self.scale_down(
+                    (self.processes - current) - self.min_concurrency)
+
+    def update(self, max=None, min=None):
+        with self.mutex:
+            if max is not None:
+                if max < self.max_concurrency:
+                    self._shrink(self.processes - max)
+                self.max_concurrency = max
+            if min is not None:
+                if min > self.min_concurrency:
+                    self._grow(min - self.min_concurrency)
+                self.min_concurrency = min
+            return self.max_concurrency, self.min_concurrency
+
+    def force_scale_up(self, n):
+        with self.mutex:
+            new = self.processes + n
+            if new > self.max_concurrency:
+                self.max_concurrency = new
+            self.min_concurrency += 1
+            self._grow(n)
+
+    def force_scale_down(self, n):
+        with self.mutex:
+            new = self.processes - n
+            if new < self.min_concurrency:
+                self.min_concurrency = new
+            self._shrink(n)
 
 
     def scale_up(self, n):
     def scale_up(self, n):
-        self.logger.info("Scaling up %s processes." % (n, ))
         self._last_action = time()
         self._last_action = time()
-        return self.pool.grow(n)
+        return self._grow(n)
+
+    def _grow(self, n):
+        self.logger.info("Scaling up %s processes." % (n, ))
+        self.pool.grow(n)
+
+    def _shrink(self, n):
+        self.logger.info("Scaling down %s processes." % (n, ))
+        try:
+            self.pool.shrink(n)
+        except ValueError:
+            self.logger.debug(
+                "Autoscaler won't scale down: all processes busy.")
+        except Exception, exc:
+            self.logger.error("Autoscaler: scale_down: %r\n%r" % (
+                                exc, traceback.format_stack()),
+                                exc_info=sys.exc_info())
 
 
     def scale_down(self, n):
     def scale_down(self, n):
         if not self._last_action or not n:
         if not self._last_action or not n:
             return
             return
         if time() - self._last_action > self.keepalive:
         if time() - self._last_action > self.keepalive:
-            self.logger.info("Scaling down %s processes." % (n, ))
             self._last_action = time()
             self._last_action = time()
-            try:
-                self.pool.shrink(n)
-            except ValueError:
-                self.logger.debug(
-                    "Autoscaler won't scale down: all processes busy.")
-            except Exception, exc:
-                self.logger.error("Autoscaler: scale_down: %r\n%r" % (
-                                    exc, traceback.format_stack()),
-                                  exc_info=sys.exc_info())
+            self._shrink(n)
 
 
     def run(self):
     def run(self):
         while not self._shutdown.isSet():
         while not self._shutdown.isSet():
             try:
             try:
                 self.scale()
                 self.scale()
+                sleep(1.0)
             except Exception, exc:
             except Exception, exc:
                 self.logger.error("Thread Autoscaler crashed: %r" % (exc, ),
                 self.logger.error("Thread Autoscaler crashed: %r" % (exc, ),
                                   exc_info=sys.exc_info())
                                   exc_info=sys.exc_info())
@@ -71,6 +110,12 @@ class Autoscaler(threading.Thread):
         if self.isAlive():
         if self.isAlive():
             self.join(1e10)
             self.join(1e10)
 
 
+    def info(self):
+        return {"max": self.max_concurrency,
+                "min": self.min_concurrency,
+                "current": self.processes,
+                "qty": self.qty}
+
     @property
     @property
     def qty(self):
     def qty(self):
         return len(state.reserved_requests)
         return len(state.reserved_requests)

+ 2 - 1
celery/worker/buckets.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 import threading
 import threading
 
 

+ 5 - 3
celery/worker/consumer.py

@@ -1,4 +1,5 @@
-from __future__ import absolute_import, with_statement
+from __future__ import absolute_import
+from __future__ import with_statement
 
 
 """
 """
 
 
@@ -252,10 +253,11 @@ class Consumer(object):
     def __init__(self, ready_queue, eta_schedule, logger,
     def __init__(self, ready_queue, eta_schedule, logger,
             init_callback=noop, send_events=False, hostname=None,
             init_callback=noop, send_events=False, hostname=None,
             initial_prefetch_count=2, pool=None, app=None,
             initial_prefetch_count=2, pool=None, app=None,
-            priority_timer=None):
+            priority_timer=None, controller=None):
         self.app = app_or_default(app)
         self.app = app_or_default(app)
         self.connection = None
         self.connection = None
         self.task_consumer = None
         self.task_consumer = None
+        self.controller = controller
         self.broadcast_consumer = None
         self.broadcast_consumer = None
         self.ready_queue = ready_queue
         self.ready_queue = ready_queue
         self.eta_schedule = eta_schedule
         self.eta_schedule = eta_schedule
@@ -297,7 +299,7 @@ class Consumer(object):
                 self.consume_messages()
                 self.consume_messages()
             except self.connection_errors:
             except self.connection_errors:
                 self.logger.error("Consumer: Connection to broker lost."
                 self.logger.error("Consumer: Connection to broker lost."
-                                + " Trying to re-establish connection...",
+                                + " Trying to re-establish the connection...",
                                 exc_info=sys.exc_info())
                                 exc_info=sys.exc_info())
 
 
     def consume_messages(self):
     def consume_messages(self):

+ 34 - 12
celery/worker/control/builtins.py

@@ -160,9 +160,13 @@ def dump_active(panel, safe=False, **kwargs):
 
 
 @Panel.register
 @Panel.register
 def stats(panel, **kwargs):
 def stats(panel, **kwargs):
+    asinfo = {}
+    if panel.consumer.controller.autoscaler:
+        asinfo = panel.consumer.controller.autoscaler.info()
     return {"total": state.total_count,
     return {"total": state.total_count,
             "consumer": panel.consumer.info,
             "consumer": panel.consumer.info,
-            "pool": panel.consumer.pool.info}
+            "pool": panel.consumer.pool.info,
+            "autoscaler": asinfo}
 
 
 
 
 @Panel.register
 @Panel.register
@@ -197,16 +201,31 @@ def ping(panel, **kwargs):
 
 
 @Panel.register
 @Panel.register
 def pool_grow(panel, n=1, **kwargs):
 def pool_grow(panel, n=1, **kwargs):
-    panel.consumer.pool.grow(n)
+    if panel.consumer.controller.autoscaler:
+        panel.consumer.controller.autoscaler.force_scale_up(n)
+    else:
+        panel.consumer.pool.grow(n)
     return {"ok": "spawned worker processes"}
     return {"ok": "spawned worker processes"}
 
 
 
 
 @Panel.register
 @Panel.register
 def pool_shrink(panel, n=1, **kwargs):
 def pool_shrink(panel, n=1, **kwargs):
-    panel.consumer.pool.shrink(n)
+    if panel.consumer.controller.autoscaler:
+        panel.consumer.controller.autoscaler.force_scale_down(n)
+    else:
+        panel.consumer.pool.shrink(n)
     return {"ok": "terminated worker processes"}
     return {"ok": "terminated worker processes"}
 
 
 
 
+@Panel.register
+def autoscale(panel, max=None, min=None):
+    autoscaler = panel.consumer.controller.autoscaler
+    if autoscaler:
+        max_, min_ = autoscaler.update(max, min)
+        return {"ok": "autoscale now min=%r max=%r" % (max_, min_)}
+    raise ValueError("Autoscale not enabled")
+
+
 @Panel.register
 @Panel.register
 def shutdown(panel, **kwargs):
 def shutdown(panel, **kwargs):
     panel.logger.warning("Got shutdown from remote.")
     panel.logger.warning("Got shutdown from remote.")
@@ -217,15 +236,18 @@ def shutdown(panel, **kwargs):
 def add_consumer(panel, queue=None, exchange=None, exchange_type="direct",
 def add_consumer(panel, queue=None, exchange=None, exchange_type="direct",
         routing_key=None, **options):
         routing_key=None, **options):
     cset = panel.consumer.task_consumer
     cset = panel.consumer.task_consumer
-    declaration = dict(queue=queue,
-                       exchange=exchange,
-                       exchange_type=exchange_type,
-                       routing_key=routing_key,
-                       **options)
-    cset.add_consumer_from_dict(**declaration)
-    cset.consume()
-    panel.logger.info("Started consuming from %r" % (declaration, ))
-    return {"ok": "started consuming from %s" % (queue, )}
+    if not cset.consuming_from(queue):
+        declaration = dict(queue=queue,
+                           exchange=exchange,
+                           exchange_type=exchange_type,
+                           routing_key=routing_key,
+                           **options)
+        cset.add_consumer_from_dict(**declaration)
+        cset.consume()
+        panel.logger.info("Started consuming from %r" % (declaration, ))
+        return {"ok": "started consuming from %s" % (queue, )}
+    else:
+        return {"ok": "already consuming from %s" % (queue, )}
 
 
 
 
 @Panel.register
 @Panel.register

+ 2 - 0
contrib/debian/README.rst

@@ -0,0 +1,2 @@
+These init scripts have been deprecated,
+please use ../generic-init.d instead.

+ 0 - 0
contrib/debian/init.d/celerybeat → contrib/debian/init.d-deprecated/celerybeat


+ 0 - 0
contrib/debian/init.d/celeryd → contrib/debian/init.d-deprecated/celeryd


+ 0 - 0
contrib/debian/init.d/celeryevcam → contrib/debian/init.d-deprecated/celeryevcam


+ 206 - 0
contrib/generic-init.d/celerybeat

@@ -0,0 +1,206 @@
+#!/bin/bash -e
+# =========================================================
+#  celerybeat - Starts the Celery periodic task scheduler.
+# =========================================================
+#
+# :Usage: /etc/init.d/celerybeat {start|stop|force-reload|restart|try-restart|status}
+#
+# :Configuration file: /etc/default/celerybeat or /etc/default/celeryd
+#
+# EXAMPLE CONFIGURATION
+# =====================
+#
+# this is an example configuration for a Python project:
+#
+# /etc/default/celeryd:
+#
+#   # Where to chdir at start.
+#   CELERYD_CHDIR="/opt/Myproject/"
+#
+#   # Extra arguments to celeryd
+#   CELERYD_OPTS="--time-limit=300"
+#
+#   # Extra arguments to celerybeat
+#   CELERYBEAT_OPTS="--schedule=/var/run/celerybeat-schedule"
+#
+#   # Name of the celery config module.#
+#   CELERY_CONFIG_MODULE="celeryconfig"
+#
+# EXAMPLE DJANGO CONFIGURATION
+# ============================
+#
+#   # Where the Django project is.
+#   CELERYD_CHDIR="/opt/Project/"
+#
+#   # Name of the projects settings module.
+#   export DJANGO_SETTINGS_MODULE="settings"
+#
+#   # Path to celeryd
+#   CELERYD="/opt/Project/manage.py celeryd"
+#
+#   # Path to celerybeat
+#   CELERYBEAT="/opt/Project/manage.py"
+#
+#   # Extra arguments to celerybeat
+#   CELERYBEAT_OPTS="celerybeat --schedule=/var/run/celerybeat-schedule"
+#
+# AVAILABLE OPTIONS
+# =================
+#
+#   * CELERYBEAT_OPTS
+#       Additional arguments to celerybeat, see `celerybeat --help` for a
+#       list.
+#
+#   * CELERYBEAT_PID_FILE
+#       Full path to the pidfile. Default is /var/run/celeryd.pid.
+#
+#   * CELERYBEAT_LOG_FILE
+#       Full path to the celeryd logfile. Default is /var/log/celeryd.log
+#
+#   * CELERYBEAT_LOG_LEVEL
+#       Log level to use for celeryd. Default is INFO.
+#
+#   * CELERYBEAT
+#       Path to the celeryd program. Default is `celeryd`.
+#       You can point this to an virtualenv, or even use manage.py for django.
+#
+#   * CELERYBEAT_USER
+#       User to run celeryd as. Default is current user.
+#
+#   * CELERYBEAT_GROUP
+#       Group to run celeryd as. Default is current user.
+#
+#   * VIRTUALENV
+#       Full path to the virtualenv environment to activate. Default is none.
+
+### BEGIN INIT INFO
+# Provides:          celerybeat
+# Required-Start:    $network $local_fs $remote_fs
+# Required-Stop:     $network $local_fs $remote_fs
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: celery periodic task scheduler
+### END INIT INFO
+
+set -e
+
+DEFAULT_PID_FILE="/var/run/celerybeat.pid"
+DEFAULT_LOG_FILE="/var/log/celerybeat.log"
+DEFAULT_LOG_LEVEL="INFO"
+DEFAULT_CELERYBEAT="celerybeat"
+
+# /etc/init.d/ssh: start and stop the celery task worker daemon.
+
+if test -f /etc/default/celeryd; then
+    . /etc/default/celeryd
+fi
+
+if test -f /etc/default/celerybeat; then
+    . /etc/default/celerybeat
+fi
+
+CELERYBEAT=${CELERYBEAT:-$DEFAULT_CELERYBEAT}
+CELERYBEAT_PID_FILE=${CELERYBEAT_PID_FILE:-${CELERYBEAT_PIDFILE:-$DEFAULT_PID_FILE}}
+CELERYBEAT_LOG_FILE=${CELERYBEAT_LOG_FILE:-${CELERYBEAT_LOGFILE:-$DEFAULT_LOG_FILE}}
+CELERYBEAT_LOG_LEVEL=${CELERYBEAT_LOG_LEVEL:-${CELERYBEAT_LOGLEVEL:-$DEFAULT_LOG_LEVEL}}
+
+export CELERY_LOADER
+
+CELERYBEAT_OPTS="$CELERYBEAT_OPTS -f $CELERYBEAT_LOG_FILE -l $CELERYBEAT_LOG_LEVEL"
+
+if [ -n "$2" ]; then
+    CELERYBEAT_OPTS="$CELERYBEAT_OPTS $2"
+fi
+
+# Extra start-stop-daemon options, like user/group.
+if [ -n "$CELERYBEAT_USER" ]; then
+    DAEMON_OPTS="$DAEMON_OPTS --uid $CELERYBEAT_USER"
+fi
+if [ -n "$CELERYBEAT_GROUP" ]; then
+    DAEMON_OPTS="$DAEMON_OPTS --gid $CELERYBEAT_GROUP"
+fi
+
+CELERYBEAT_CHDIR=${CELERYBEAT_CHDIR:-$CELERYD_CHDIR}
+if [ -n "$CELERYBEAT_CHDIR" ]; then
+    DAEMON_OPTS="$DAEMON_OPTS --workdir $CELERYBEAT_CHDIR"
+fi
+
+
+check_dev_null() {
+    if [ ! -c /dev/null ]; then
+        echo "/dev/null is not a character device!"
+        exit 1
+    fi
+}
+
+
+export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
+
+
+wait_pid () {
+    pid=$1
+    forever=1
+    i=0
+    while [ $forever -gt 0 ]; do
+        kill -0 $pid 1>/dev/null 2>&1
+        if [ ! $? ]; then
+            echo "OK"
+            forever=0
+        else
+            kill -TERM "$pid"
+            i=$((i + 1))
+            if [ $i -gt 60 ]; then
+                echo "ERROR"
+                echo "Timed out while stopping (30s)"
+                forever=0
+            else
+                sleep 0.5
+            fi
+        fi
+    done
+}
+
+
+stop_beat () {
+    echo -n "Stopping celerybeat... "
+    if [ -f "$CELERYBEAT_PID_FILE" ]; then
+        wait_pid $(cat "$CELERYBEAT_PID_FILE")
+    else
+        echo "NOT RUNNING"
+    fi
+}
+
+start_beat () {
+    echo "Starting celerybeat..."
+    if [ -n "$VIRTUALENV" ]; then
+        source $VIRTUALENV/bin/activate
+    fi
+    $CELERYBEAT $CELERYBEAT_OPTS $DAEMON_OPTS --detach \
+                --pidfile="$CELERYBEAT_PID_FILE"
+}
+
+
+
+case "$1" in
+  start)
+    check_dev_null
+    start_beat
+    ;;
+  stop)
+    stop_beat
+    ;;
+  reload|force-reload)
+    echo "Use start+stop"
+    ;;
+  restart)
+    echo "Restarting celery periodic task scheduler"
+    stop_beat
+    start_beat
+    ;;
+
+  *)
+    echo "Usage: /etc/init.d/celerybeat {start|stop|restart}"
+    exit 1
+esac
+
+exit 0

+ 217 - 0
contrib/generic-init.d/celeryevcam

@@ -0,0 +1,217 @@
+#!/bin/bash -e
+# ============================================
+#  celeryd - Starts the Celery worker daemon.
+# ============================================
+#
+# :Usage: /etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status}
+#
+# :Configuration file: /etc/default/celeryev | /etc/default/celeryd
+#
+# To configure celeryd you probably need to tell it where to chdir.
+#
+# EXAMPLE CONFIGURATION
+# =====================
+#
+# this is an example configuration for a Python project:
+#
+# /etc/default/celeryd:
+#
+#   # Where to chdir at start.
+#   CELERYD_CHDIR="/opt/Myproject/"
+#
+#   # Extra arguments to celeryev
+#   CELERYEV_OPTS="-x"
+#
+#   # Name of the celery config module.#
+#   CELERY_CONFIG_MODULE="celeryconfig"
+#
+#   # Camera class to use (required)
+#   CELERYEV_CAM = "myapp.Camera"
+#
+# EXAMPLE DJANGO CONFIGURATION
+# ============================
+#
+#   # Where the Django project is.
+#   CELERYD_CHDIR="/opt/Project/"
+#
+#   # Name of the projects settings module.
+#   export DJANGO_SETTINGS_MODULE="settings"
+#
+#   # Path to celeryd
+#   CELERYEV="/opt/Project/manage.py"
+#
+#   # Extra arguments to manage.py
+#   CELERYEV_OPTS="celeryev"
+#
+#   # Camera class to use (required)
+#   CELERYEV_CAM = "djcelery.snapshot.Camera"
+#
+# AVAILABLE OPTIONS
+# =================
+#
+#   * CELERYEV_OPTS
+#       Additional arguments to celeryd, see `celeryd --help` for a list.
+#
+#   * CELERYD_CHDIR
+#       Path to chdir at start. Default is to stay in the current directory.
+#
+#   * CELERYEV_PID_FILE
+#       Full path to the pidfile. Default is /var/run/celeryd.pid.
+#
+#   * CELERYEV_LOG_FILE
+#       Full path to the celeryd logfile. Default is /var/log/celeryd.log
+#
+#   * CELERYEV_LOG_LEVEL
+#       Log level to use for celeryd. Default is INFO.
+#
+#   * CELERYEV
+#       Path to the celeryev program. Default is `celeryev`.
+#       You can point this to an virtualenv, or even use manage.py for django.
+#
+#   * CELERYEV_USER
+#       User to run celeryev as. Default is current user.
+#
+#   * CELERYEV_GROUP
+#       Group to run celeryev as. Default is current user.
+#
+#   * VIRTUALENV
+#       Full path to the virtualenv environment to activate. Default is none.
+
+### BEGIN INIT INFO
+# Provides:          celeryev
+# Required-Start:    $network $local_fs $remote_fs
+# Required-Stop:     $network $local_fs $remote_fs
+# Default-Start:     2 3 4 5
+# Default-Stop:      0 1 6
+# Short-Description: celery event snapshots
+### END INIT INFO
+
+set -e
+
+DEFAULT_PID_FILE="/var/run/celeryev.pid"
+DEFAULT_LOG_FILE="/var/log/celeryev.log"
+DEFAULT_LOG_LEVEL="INFO"
+DEFAULT_CELERYEV="/usr/bin/celeryev"
+
+if test -f /etc/default/celeryd; then
+    . /etc/default/celeryd
+fi
+
+if test -f /etc/default/celeryev; then
+    . /etc/default/celeryev
+fi
+
+CELERYEV=${CELERYEV:-$DEFAULT_CELERYEV}
+CELERYEV_PID_FILE=${CELERYEV_PID_FILE:-${CELERYEV_PIDFILE:-$DEFAULT_PID_FILE}}
+CELERYEV_LOG_FILE=${CELERYEV_LOG_FILE:-${CELERYEV_LOGFILE:-$DEFAULT_LOG_FILE}}
+CELERYEV_LOG_LEVEL=${CELERYEV_LOG_LEVEL:-${CELERYEV_LOG_LEVEL:-$DEFAULT_LOG_LEVEL}}
+
+export CELERY_LOADER
+
+CELERYEV_OPTS="$CELERYEV_OPTS -f $CELERYEV_LOG_FILE -l $CELERYEV_LOG_LEVEL"
+
+if [ -z "$CELERYEV_CAM" ]; then
+    echo "Missing CELERYEV_CAM variable" 1>&2
+    exit
+fi
+
+CELERYEV_OPTS="$CELERYEV_OPTS -c $CELERYEV_CAM"
+
+if [ -n "$2" ]; then
+    CELERYEV_OPTS="$CELERYEV_OPTS $2"
+fi
+
+# Extra start-stop-daemon options, like user/group.
+if [ -n "$CELERYEV_USER" ]; then
+    DAEMON_OPTS="$DAEMON_OPTS --uid $CELERYEV_USER"
+fi
+if [ -n "$CELERYEV_GROUP" ]; then
+    DAEMON_OPTS="$DAEMON_OPTS --gid $CELERYEV_GROUP"
+fi
+
+if [ -n "$CELERYEV_CHDIR" ]; then
+    DAEMON_OPTS="$DAEMON_OPTS --workdir $CELERYEV_CHDIR"
+elif [ -n "$CELERYD_CHDIR" ]; then
+    DAEMON_OPTS="$DAEMON_OPTS --workdir $CELERYD_CHDIR"
+fi
+
+
+export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
+
+check_dev_null() {
+    if [ ! -c /dev/null ]; then
+        echo "/dev/null is not a character device!"
+        exit 1
+    fi
+}
+
+wait_pid () {
+    pid=$1
+    forever=1
+    i=0
+    while [ $forever -gt 0 ]; do
+        kill -0 $pid 1>/dev/null 2>&1
+        if [ ! $? ]; then
+            echo "OK"
+            forever=0
+        else
+            kill -TERM "$pid"
+            i=$((i + 1))
+            if [ $i -gt 60 ]; then
+                echo "ERROR"
+                echo "Timed out while stopping (30s)"
+                forever=0
+            else
+                sleep 0.5
+            fi
+        fi
+    done
+}
+
+
+
+stop_evcam () {
+    echo -n "Stopping celeryev..."
+    if [ -f "$CELERYEV_PID_FILE" ]; then
+        wait_pid $(cat "$CELERYEV_PID_FILE")
+    else
+        echo "NOT RUNNING"
+    fi
+}
+
+start_evcam () {
+    echo "Starting celeryev..."
+    if [ -n "$VIRTUALENV" ]; then
+        source $VIRTUALENV/bin/activate
+    fi
+    $CELERYEV $CELERYEV_OPTS $DAEMON_OPTS --detach \
+              --pidfile="$CELERYEV_PID_FILE"
+}
+
+
+
+case "$1" in
+  start)
+    check_dev_null
+    start_evcam
+    ;;
+  stop)
+    stop_evcam
+    ;;
+
+  reload|force-reload)
+    echo "Use start+stop"
+    ;;
+  restart)
+    log_daemon_msg "Restarting celery event snapshots" "celeryev"
+    stop_evcam
+    check_dev_null
+    start_evcam
+    ;;
+
+  *)
+    echo "Usage: /etc/init.d/celeryev {start|stop|restart}"
+    exit 1
+esac
+
+exit 0

+ 6 - 93
docs/cookbook/daemonizing.rst

@@ -136,101 +136,14 @@ Available options
 * CELERYD_GROUP
 * CELERYD_GROUP
     Group to run celeryd as. Default is current user.
     Group to run celeryd as. Default is current user.
 
 
-start-stop-daemon (Debian/Ubuntu/++)
-====================================
-
-See the `contrib/debian/init.d/`_ directory in the Celery distribution, this
-directory contains init scripts for celeryd and celerybeat.
-
-These scripts are configured in :file:`/etc/default/celeryd`.
-
-.. _`contrib/debian/init.d/`:
-    http://github.com/ask/celery/tree/master/contrib/debian/
-
-.. _debian-initd-celeryd:
-
-Init script: celeryd
---------------------
-
-:Usage: `/etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status}`
-:Configuration file: /etc/default/celeryd
-
-To configure celeryd you probably need to at least tell it where to change
-directory to when it starts (to find your `celeryconfig`).
-
-.. _debian-initd-celeryd-example:
-
-Example configuration
-~~~~~~~~~~~~~~~~~~~~~
-
-This is an example configuration for a Python project.
-
-:file:`/etc/default/celeryd`:
-
-    # Where to chdir at start.
-    CELERYD_CHDIR="/opt/Myproject/"
-
-    # Extra arguments to celeryd
-    CELERYD_OPTS="--time-limit=300"
-
-    # Name of the celery config module.#
-    CELERY_CONFIG_MODULE="celeryconfig"
-
-.. _debian-initd-celeryd-django-example:
-
-Example Django configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This is an example configuration for those using `django-celery`::
-
-    # Where the Django project is.
-    CELERYD_CHDIR="/opt/Project/"
-
-    # Path to celeryd
-    CELERYD="/opt/Project/manage.py celeryd"
-
-    # Name of the projects settings module.
-    export DJANGO_SETTINGS_MODULE="settings"
-
-.. _debian-initd-celeryd-options:
-
-Available options
-~~~~~~~~~~~~~~~~~~
-
-* CELERYD_OPTS
-    Additional arguments to celeryd, see `celeryd --help` for a list.
-
-* CELERYD_CHDIR
-    Path to change directory to at start. Default is to stay in the current
-    directory.
-
-* CELERYD_PID_FILE
-    Full path to the PID file. Default is /var/run/celeryd.pid.
-
-* CELERYD_LOG_FILE
-    Full path to the celeryd log file. Default is /var/log/celeryd.log
-
-* CELERYD_LOG_LEVEL
-    Log level to use for celeryd. Default is INFO.
-
-* CELERYD
-    Path to the celeryd program. Default is `celeryd`.
-    You can point this to an virtualenv, or even use manage.py for django.
-
-* CELERYD_USER
-    User to run celeryd as. Default is current user.
-
-* CELERYD_GROUP
-    Group to run celeryd as. Default is current user.
-
-.. _debian-initd-celerybeat:
+.. _generic-initd-celerybeat:
 
 
 Init script: celerybeat
 Init script: celerybeat
 -----------------------
 -----------------------
-:Usage: `/etc/init.d/celerybeat {start|stop|force-reload|restart|try-restart|status}`
+:Usage: `/etc/init.d/celerybeat {start|stop|restart}`
 :Configuration file: /etc/default/celerybeat or /etc/default/celeryd
 :Configuration file: /etc/default/celerybeat or /etc/default/celeryd
 
 
-.. _debian-initd-celerybeat-example:
+.. _generic-initd-celerybeat-example:
 
 
 Example configuration
 Example configuration
 ~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~
@@ -251,7 +164,7 @@ This is an example configuration for a Python project:
     # Name of the celery config module.#
     # Name of the celery config module.#
     CELERY_CONFIG_MODULE="celeryconfig"
     CELERY_CONFIG_MODULE="celeryconfig"
 
 
-.. _debian-initd-celerybeat-django-example:
+.. _generic-initd-celerybeat-django-example:
 
 
 Example Django configuration
 Example Django configuration
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -273,7 +186,7 @@ This is an example configuration for those using `django-celery`::
     # Extra arguments to celerybeat
     # Extra arguments to celerybeat
     CELERYBEAT_OPTS="--schedule=/var/run/celerybeat-schedule"
     CELERYBEAT_OPTS="--schedule=/var/run/celerybeat-schedule"
 
 
-.. _debian-initd-celerybeat-options:
+.. _generic-initd-celerybeat-options:
 
 
 Available options
 Available options
 ~~~~~~~~~~~~~~~~~
 ~~~~~~~~~~~~~~~~~
@@ -301,7 +214,7 @@ Available options
 * CELERYBEAT_GROUP
 * CELERYBEAT_GROUP
     Group to run celeryd as. Default is current user.
     Group to run celeryd as. Default is current user.
 
 
-.. _debian-initd-troubleshooting:
+.. _generic-initd-troubleshooting:
 
 
 Troubleshooting
 Troubleshooting
 ---------------
 ---------------

+ 2 - 2
docs/internals/protocol.rst

@@ -1,7 +1,7 @@
 .. _internals-task-message-protocol:
 .. _internals-task-message-protocol:
 
 
 =======================
 =======================
- Task Message Protocol
+ Task Messages
 =======================
 =======================
 
 
 .. contents::
 .. contents::
@@ -90,7 +90,7 @@ format:
 Serialization
 Serialization
 =============
 =============
 
 
-The protocol supports several serialization formats using the
+Several types of serialization formats are supported using the
 `content_type` message header.
 `content_type` message header.
 
 
 The MIME-types supported by default are shown in the following table.
 The MIME-types supported by default are shown in the following table.

+ 4 - 0
docs/userguide/tasks.rst

@@ -25,6 +25,8 @@ Given a function create_user`, that takes two arguments: `username` and
 
 
     from django.contrib.auth import User
     from django.contrib.auth import User
 
 
+    from celery.task import task
+
     @task
     @task
     def create_user(username, password):
     def create_user(username, password):
         User.objects.create(username=username, password=password)
         User.objects.create(username=username, password=password)
@@ -79,6 +81,8 @@ Example Usage
 
 
 ::
 ::
 
 
+    from celery.task import task
+
     @task
     @task
     def add(x, y):
     def add(x, y):
         print("Executing task id %r, args: %r kwargs: %r" % (
         print("Executing task id %r, args: %r kwargs: %r" % (

+ 1 - 1
requirements/default.txt

@@ -1,4 +1,4 @@
 python-dateutil>=1.5.0,<2.0.0
 python-dateutil>=1.5.0,<2.0.0
 anyjson>=0.3.1
 anyjson>=0.3.1
-kombu>=1.1.5,<2.0.0
+kombu>=1.2.1,<2.0.0
 pyparsing>=1.5.0,<2.0.0
 pyparsing>=1.5.0,<2.0.0

+ 1 - 1
setup.cfg

@@ -44,5 +44,5 @@ requires = uuid
            multiprocessing == 2.6.2.1
            multiprocessing == 2.6.2.1
            python-dateutil <= 1.5.0
            python-dateutil <= 1.5.0
            anyjson >= 0.3.1
            anyjson >= 0.3.1
-           kombu >= 1.1.5
+           kombu >= 1.2.1
            pyparsing >= 1.5.0
            pyparsing >= 1.5.0

+ 1 - 1
setup.py

@@ -49,7 +49,7 @@ except ImportError:
 install_requires.extend([
 install_requires.extend([
     "python-dateutil>=1.5.0,<2.0.0",
     "python-dateutil>=1.5.0,<2.0.0",
     "anyjson>=0.3.1",
     "anyjson>=0.3.1",
-    "kombu>=1.1.5,<2.0.0",
+    "kombu>=1.2.1,<2.0.0",
     "pyparsing>=1.5.0,<2.0.0",
     "pyparsing>=1.5.0,<2.0.0",
 ])
 ])
 py_version = sys.version_info
 py_version = sys.version_info