Ver Fonte

Use ' instead of "

Ask Solem há 12 anos atrás
pai
commit
566c4da7b4
100 ficheiros alterados com 2137 adições e 2162 exclusões
  1. 8 0
      LICENSE
  2. 35 35
      celery/__compat__.py
  3. 17 23
      celery/__init__.py
  4. 9 9
      celery/app/__init__.py
  5. 5 5
      celery/app/abstract.py
  6. 32 32
      celery/app/amqp.py
  7. 3 3
      celery/app/annotations.py
  8. 35 35
      celery/app/base.py
  9. 28 28
      celery/app/builtins.py
  10. 36 36
      celery/app/control.py
  11. 149 149
      celery/app/defaults.py
  12. 13 13
      celery/app/log.py
  13. 3 3
      celery/app/registry.py
  14. 6 6
      celery/app/routes.py
  15. 64 64
      celery/app/task.py
  16. 25 25
      celery/app/utils.py
  17. 24 24
      celery/apps/beat.py
  18. 69 69
      celery/apps/worker.py
  19. 8 8
      celery/backends/__init__.py
  20. 31 31
      celery/backends/amqp.py
  21. 49 49
      celery/backends/base.py
  22. 14 14
      celery/backends/cache.py
  23. 26 26
      celery/backends/cassandra.py
  24. 6 6
      celery/backends/database/__init__.py
  25. 15 13
      celery/backends/database/a805d4bd.py
  26. 2 2
      celery/backends/database/dfd042c7.py
  27. 16 16
      celery/backends/database/models.py
  28. 2 2
      celery/backends/database/session.py
  29. 41 41
      celery/backends/mongodb.py
  30. 10 10
      celery/backends/redis.py
  31. 38 38
      celery/beat.py
  32. 32 32
      celery/bin/base.py
  33. 72 72
      celery/bin/camqadm.py
  34. 178 192
      celery/bin/celery.py
  35. 8 8
      celery/bin/celerybeat.py
  36. 1 1
      celery/bin/celeryctl.py
  37. 30 30
      celery/bin/celeryd.py
  38. 19 19
      celery/bin/celeryd_detach.py
  39. 91 91
      celery/bin/celeryd_multi.py
  40. 15 15
      celery/bin/celeryev.py
  41. 52 52
      celery/canvas.py
  42. 5 5
      celery/concurrency/__init__.py
  43. 4 4
      celery/concurrency/base.py
  44. 1 1
      celery/concurrency/eventlet.py
  45. 1 1
      celery/concurrency/gevent.py
  46. 17 17
      celery/concurrency/processes/__init__.py
  47. 5 5
      celery/concurrency/solo.py
  48. 1 1
      celery/concurrency/threads.py
  49. 4 4
      celery/contrib/abortable.py
  50. 8 8
      celery/contrib/batches.py
  51. 31 31
      celery/contrib/bundles.py
  52. 2 2
      celery/contrib/methods.py
  53. 7 7
      celery/contrib/migrate.py
  54. 17 17
      celery/contrib/rdb.py
  55. 7 7
      celery/datastructures.py
  56. 16 16
      celery/events/__init__.py
  57. 63 63
      celery/events/cursesmon.py
  58. 25 25
      celery/events/dumper.py
  59. 6 6
      celery/events/snapshot.py
  60. 20 20
      celery/events/state.py
  61. 7 7
      celery/loaders/__init__.py
  62. 12 12
      celery/loaders/base.py
  63. 10 10
      celery/loaders/default.py
  64. 6 6
      celery/local.py
  65. 57 57
      celery/platforms.py
  66. 7 7
      celery/result.py
  67. 36 36
      celery/schedules.py
  68. 2 2
      celery/security/__init__.py
  69. 6 6
      celery/security/certificate.py
  70. 2 2
      celery/security/key.py
  71. 15 15
      celery/security/serialization.py
  72. 1 1
      celery/security/utils.py
  73. 16 26
      celery/signals.py
  74. 14 14
      celery/states.py
  75. 6 6
      celery/task/__init__.py
  76. 22 22
      celery/task/base.py
  77. 23 23
      celery/task/http.py
  78. 1 1
      celery/task/sets.py
  79. 10 10
      celery/task/trace.py
  80. 18 18
      celery/utils/__init__.py
  81. 3 3
      celery/utils/compat.py
  82. 5 5
      celery/utils/debug.py
  83. 8 13
      celery/utils/functional.py
  84. 8 8
      celery/utils/imports.py
  85. 14 14
      celery/utils/log.py
  86. 15 15
      celery/utils/mail.py
  87. 4 4
      celery/utils/serialization.py
  88. 22 22
      celery/utils/term.py
  89. 11 11
      celery/utils/text.py
  90. 9 9
      celery/utils/threads.py
  91. 19 19
      celery/utils/timer2.py
  92. 19 19
      celery/utils/timeutils.py
  93. 38 38
      celery/worker/__init__.py
  94. 10 10
      celery/worker/abstract.py
  95. 16 16
      celery/worker/autoreload.py
  96. 10 10
      celery/worker/autoscale.py
  97. 4 4
      celery/worker/buckets.py
  98. 28 28
      celery/worker/consumer.py
  99. 63 63
      celery/worker/control.py
  100. 3 3
      celery/worker/heartbeat.py

+ 8 - 0
LICENSE

@@ -34,6 +34,14 @@ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 POSSIBILITY OF SUCH DAMAGE.
 
+Documentation License
+=====================
+
+The documentation portion of Celery (the rendered contents of the
+"docs" directory of a software distribution or checkout) is supplied
+under the Creative Commons Attribution-Noncommercial-Share Alike 3.0
+United States License as described by
+http://creativecommons.org/licenses/by-nc-sa/3.0/us/
 
 Footnotes
 =========

+ 35 - 35
celery/__compat__.py

@@ -23,7 +23,7 @@ MODULE_DEPRECATED = """
 The module %s is deprecated and will be removed in a future version.
 """
 
-DEFAULT_ATTRS = set(["__file__", "__path__", "__doc__", "__all__"])
+DEFAULT_ATTRS = set(['__file__', '__path__', '__doc__', '__all__'])
 
 # im_func is no longer available in Py3.
 # instead the unbound method itself can be used.
@@ -37,61 +37,61 @@ else:
 
 def getappattr(path):
     """Gets attribute from the current_app recursively,
-    e.g. getappattr("amqp.get_task_consumer")``."""
+    e.g. getappattr('amqp.get_task_consumer')``."""
     from celery import current_app
     return current_app._rgetattr(path)
 
 
 def _compat_task_decorator(*args, **kwargs):
     from celery import current_app
-    kwargs.setdefault("accept_magic_kwargs", True)
+    kwargs.setdefault('accept_magic_kwargs', True)
     return current_app.task(*args, **kwargs)
 
 
 def _compat_periodic_task_decorator(*args, **kwargs):
     from celery.task import periodic_task
-    kwargs.setdefault("accept_magic_kwargs", True)
+    kwargs.setdefault('accept_magic_kwargs', True)
     return periodic_task(*args, **kwargs)
 
 
 COMPAT_MODULES = {
-    "celery": {
-        "execute": {
-            "send_task": "send_task",
+    'celery': {
+        'execute': {
+            'send_task': 'send_task',
         },
-        "decorators": {
-            "task": _compat_task_decorator,
-            "periodic_task": _compat_periodic_task_decorator,
+        'decorators': {
+            'task': _compat_task_decorator,
+            'periodic_task': _compat_periodic_task_decorator,
         },
-        "log": {
-            "get_default_logger": "log.get_default_logger",
-            "setup_logger": "log.setup_logger",
-            "setup_loggig_subsystem": "log.setup_logging_subsystem",
-            "redirect_stdouts_to_logger": "log.redirect_stdouts_to_logger",
+        'log': {
+            'get_default_logger': 'log.get_default_logger',
+            'setup_logger': 'log.setup_logger',
+            'setup_loggig_subsystem': 'log.setup_logging_subsystem',
+            'redirect_stdouts_to_logger': 'log.redirect_stdouts_to_logger',
         },
-        "messaging": {
-            "TaskPublisher": "amqp.TaskPublisher",
-            "TaskConsumer": "amqp.TaskConsumer",
-            "establish_connection": "broker_connection",
-            "with_connection": "with_default_connection",
-            "get_consumer_set": "amqp.TaskConsumer",
+        'messaging': {
+            'TaskPublisher': 'amqp.TaskPublisher',
+            'TaskConsumer': 'amqp.TaskConsumer',
+            'establish_connection': 'broker_connection',
+            'with_connection': 'with_default_connection',
+            'get_consumer_set': 'amqp.TaskConsumer',
         },
-        "registry": {
-            "tasks": "tasks",
+        'registry': {
+            'tasks': 'tasks',
         },
     },
-    "celery.task": {
-        "control": {
-            "broadcast": "control.broadcast",
-            "rate_limit": "control.rate_limit",
-            "time_limit": "control.time_limit",
-            "ping": "control.ping",
-            "revoke": "control.revoke",
-            "discard_all": "control.purge",
-            "inspect": "control.inspect",
+    'celery.task': {
+        'control': {
+            'broadcast': 'control.broadcast',
+            'rate_limit': 'control.rate_limit',
+            'time_limit': 'control.time_limit',
+            'ping': 'control.ping',
+            'revoke': 'control.revoke',
+            'discard_all': 'control.purge',
+            'inspect': 'control.inspect',
         },
-        "schedules": "celery.schedules",
-        "chords": "celery.canvas",
+        'schedules': 'celery.schedules',
+        'chords': 'celery.canvas',
     }
 }
 
@@ -187,7 +187,7 @@ def get_compat_module(pkg, name):
         fqdn = '.'.join([pkg.__name__, name])
         module = sys.modules[fqdn] = import_module(attrs)
         return module
-    attrs["__all__"] = attrs.keys()
+    attrs['__all__'] = attrs.keys()
     return create_module(name, dict(attrs), pkg=pkg, prepare_attr=prepare)
 
 

+ 17 - 23
celery/__init__.py

@@ -7,12 +7,12 @@
 
 from __future__ import absolute_import
 
-VERSION = (2, 6, 0, "rc4")
-__version__ = ".".join(map(str, VERSION[0:3])) + "".join(VERSION[3:])
-__author__ = "Ask Solem"
-__contact__ = "ask@celeryproject.org"
-__homepage__ = "http://celeryproject.org"
-__docformat__ = "restructuredtext"
+VERSION = (2, 6, 0, 'rc4')
+__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
+__author__ = 'Ask Solem'
+__contact__ = 'ask@celeryproject.org'
+__homepage__ = 'http://celeryproject.org'
+__docformat__ = 'restructuredtext'
 
 # -eof meta-
 
@@ -21,22 +21,16 @@ from .__compat__ import recreate_module
 
 old_module, new_module = recreate_module(__name__,  # pragma: no cover
     by_module={
-        "celery.app":       ["Celery", "bugreport"],
-        "celery.app.task":  ["Task"],
-        "celery.state":     ["current_app", "current_task"],
-        "celery.canvas":    ["chain", "chord", "chunks",
-                             "group", "subtask", "xmap", "xstarmap"],
-        "celery.utils":     ["uuid"],
+        'celery.app':       ['Celery', 'bugreport'],
+        'celery.app.task':  ['Task'],
+        'celery.state':     ['current_app', 'current_task'],
+        'celery.canvas':    ['chain', 'chord', 'chunks',
+                             'group', 'subtask', 'xmap', 'xstarmap'],
+        'celery.utils':     ['uuid'],
     },
-    direct={"task": "celery.task"},
-    __package__="celery",
-    __file__=__file__,
-    __path__=__path__,
-    __doc__=__doc__,
-    __version__=__version__,
-    __author__=__author__,
-    __contact__=__contact__,
-    __homepage__=__homepage__,
-    __docformat__=__docformat__,
-    VERSION=VERSION,
+    direct={'task': 'celery.task'},
+    __package__='celery', __file__=__file__,
+    __path__=__path__, __doc__=__doc__, __version__=__version__,
+    __author__=__author__, __contact__=__contact__,
+    __homepage__=__homepage__, __docformat__=__docformat__, VERSION=VERSION,
 )

+ 9 - 9
celery/app/__init__.py

@@ -30,11 +30,11 @@ default_app = Proxy(lambda: state.default_app)
 #: is no active app.
 app_or_default = None
 
-#: The "default" loader is the default loader used by old applications.
-default_loader = os.environ.get("CELERY_LOADER") or "default"
+#: The 'default' loader is the default loader used by old applications.
+default_loader = os.environ.get('CELERY_LOADER') or 'default'
 
 #: Global fallback app instance.
-set_default_app(Celery("default", loader=default_loader,
+set_default_app(Celery('default', loader=default_loader,
                                   set_as_current=False,
                                   accept_magic_kwargs=True))
 
@@ -53,13 +53,13 @@ def _app_or_default_trace(app=None):  # pragma: no cover
     from traceback import print_stack
     from billiard import current_process
     if app is None:
-        if getattr(state._tls, "current_app", None):
-            print("-- RETURNING TO CURRENT APP --")  # noqa+
+        if getattr(state._tls, 'current_app', None):
+            print('-- RETURNING TO CURRENT APP --')  # noqa+
             print_stack()
             return state._tls.current_app
-        if current_process()._name == "MainProcess":
-            raise Exception("DEFAULT APP")
-        print("-- RETURNING TO DEFAULT APP --")      # noqa+
+        if current_process()._name == 'MainProcess':
+            raise Exception('DEFAULT APP')
+        print('-- RETURNING TO DEFAULT APP --')      # noqa+
         print_stack()
         return state.default_app
     return app
@@ -74,7 +74,7 @@ def disable_trace():
     global app_or_default
     app_or_default = _app_or_default
 
-if os.environ.get("CELERY_TRACE_APP"):  # pragma: no cover
+if os.environ.get('CELERY_TRACE_APP'):  # pragma: no cover
     enable_trace()
 else:
     disable_trace()

+ 5 - 5
celery/app/abstract.py

@@ -22,17 +22,17 @@ class from_config(object):
 class _configurated(type):
 
     def __new__(cls, name, bases, attrs):
-        attrs["__confopts__"] = dict((attr, spec.get_key(attr))
+        attrs['__confopts__'] = dict((attr, spec.get_key(attr))
                                           for attr, spec in attrs.iteritems()
                                               if isinstance(spec, from_config))
-        inherit_from = attrs.get("inherit_confopts", ())
+        inherit_from = attrs.get('inherit_confopts', ())
         for subcls in bases:
             try:
-                attrs["__confopts__"].update(subcls.__confopts__)
+                attrs['__confopts__'].update(subcls.__confopts__)
             except AttributeError:
                 pass
         for subcls in inherit_from:
-            attrs["__confopts__"].update(subcls.__confopts__)
+            attrs['__confopts__'].update(subcls.__confopts__)
         attrs = dict((k, v if not isinstance(v, from_config) else None)
                         for k, v in attrs.iteritems())
         return super(_configurated, cls).__new__(cls, name, bases, attrs)
@@ -41,7 +41,7 @@ class _configurated(type):
 class configurated(object):
     __metaclass__ = _configurated
 
-    def setup_defaults(self, kwargs, namespace="celery"):
+    def setup_defaults(self, kwargs, namespace='celery'):
         confopts = self.__confopts__
         app, find = self.app, self.app.conf.find_value_for_key
 

+ 32 - 32
celery/app/amqp.py

@@ -90,7 +90,7 @@ class Queues(dict):
 
     def add_compat(self, name, **options):
         # docs used to use binding_key as routing key
-        options.setdefault("routing_key", options.get("binding_key"))
+        options.setdefault('routing_key', options.get('binding_key'))
         q = self[name] = entry_to_queue(name, **options)
         return q
 
@@ -98,16 +98,16 @@ class Queues(dict):
         """Format routing table into string for log dumps."""
         active = self.consume_from
         if not active:
-            return ""
+            return ''
         info = [QUEUE_FORMAT.strip() % {
-                    "name": (name + ":").ljust(12),
-                    "exchange": q.exchange.name,
-                    "exchange_type": q.exchange.type,
-                    "routing_key": q.routing_key}
+                    'name': (name + ':').ljust(12),
+                    'exchange': q.exchange.name,
+                    'exchange_type': q.exchange.type,
+                    'routing_key': q.routing_key}
                         for name, q in sorted(active.iteritems())]
         if indent_first:
-            return textindent("\n".join(info), indent)
-        return info[0] + "\n" + textindent("\n".join(info[1:]), indent)
+            return textindent('\n'.join(info), indent)
+        return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)
 
     def select_subset(self, wanted):
         """Sets :attr:`consume_from` by selecting a subset of the
@@ -141,8 +141,8 @@ class TaskProducer(Producer):
     retry_policy = None
 
     def __init__(self, channel=None, exchange=None, *args, **kwargs):
-        self.retry = kwargs.pop("retry", self.retry)
-        self.retry_policy = kwargs.pop("retry_policy",
+        self.retry = kwargs.pop('retry', self.retry)
+        self.retry_policy = kwargs.pop('retry_policy',
                                         self.retry_policy or {})
         exchange = exchange or self.exchange
         self.queues = self.app.amqp.queues  # shortcut
@@ -165,9 +165,9 @@ class TaskProducer(Producer):
         task_args = task_args or []
         task_kwargs = task_kwargs or {}
         if not isinstance(task_args, (list, tuple)):
-            raise ValueError("task args must be a list or tuple")
+            raise ValueError('task args must be a list or tuple')
         if not isinstance(task_kwargs, dict):
-            raise ValueError("task kwargs must be a dictionary")
+            raise ValueError('task kwargs must be a dictionary')
         if countdown:  # Convert countdown to ETA.
             now = now or self.app.now()
             eta = now + timedelta(seconds=countdown)
@@ -177,21 +177,21 @@ class TaskProducer(Producer):
         eta = eta and eta.isoformat()
         expires = expires and expires.isoformat()
 
-        body = {"task": task_name,
-                "id": task_id,
-                "args": task_args,
-                "kwargs": task_kwargs,
-                "retries": retries or 0,
-                "eta": eta,
-                "expires": expires,
-                "utc": self.utc,
-                "callbacks": callbacks,
-                "errbacks": errbacks}
+        body = {'task': task_name,
+                'id': task_id,
+                'args': task_args,
+                'kwargs': task_kwargs,
+                'retries': retries or 0,
+                'eta': eta,
+                'expires': expires,
+                'utc': self.utc,
+                'callbacks': callbacks,
+                'errbacks': errbacks}
         group_id = group_id or taskset_id
         if group_id:
-            body["taskset"] = group_id
+            body['taskset'] = group_id
         if chord:
-            body["chord"] = chord
+            body['chord'] = chord
 
         self.publish(body, exchange=exchange, mandatory=mandatory,
              immediate=immediate, routing_key=routing_key,
@@ -203,7 +203,7 @@ class TaskProducer(Producer):
 
         signals.task_sent.send(sender=task_name, **body)
         if event_dispatcher:
-            event_dispatcher.send("task-sent", uuid=task_id,
+            event_dispatcher.send('task-sent', uuid=task_id,
                                                name=task_name,
                                                args=repr(task_args),
                                                kwargs=repr(task_kwargs),
@@ -218,14 +218,14 @@ class TaskPublisher(TaskProducer):
     """Deprecated version of :class:`TaskProducer`."""
 
     def __init__(self, channel=None, exchange=None, *args, **kwargs):
-        self.app = app_or_default(kwargs.pop("app", self.app))
-        self.retry = kwargs.pop("retry", self.retry)
-        self.retry_policy = kwargs.pop("retry_policy",
+        self.app = app_or_default(kwargs.pop('app', self.app))
+        self.retry = kwargs.pop('retry', self.retry)
+        self.retry_policy = kwargs.pop('retry_policy',
                                         self.retry_policy or {})
         exchange = exchange or self.exchange
         if not isinstance(exchange, Exchange):
             exchange = Exchange(exchange,
-                                kwargs.pop("exchange_type", "direct"))
+                                kwargs.pop('exchange_type', 'direct'))
         self.queues = self.app.amqp.queues  # shortcut
         super(TaskPublisher, self).__init__(channel, exchange, *args, **kwargs)
 
@@ -267,7 +267,7 @@ class AMQP(object):
     def Router(self, queues=None, create_missing=None):
         """Returns the current task router."""
         return _routes.Router(self.routes, queues or self.queues,
-                              self.app.either("CELERY_CREATE_MISSING_QUEUES",
+                              self.app.either('CELERY_CREATE_MISSING_QUEUES',
                                               create_missing), app=self.app)
 
     @cached_property
@@ -275,7 +275,7 @@ class AMQP(object):
         """Return consumer configured to consume from the queues
         we are configured for (``app.amqp.queues.consume_from``)."""
         return self.app.subclass_with_self(TaskConsumer,
-                                           reverse="amqp.TaskConsumer")
+                                           reverse='amqp.TaskConsumer')
     get_task_consumer = TaskConsumer  # XXX compat
 
     @cached_property
@@ -287,7 +287,7 @@ class AMQP(object):
         """
         conf = self.app.conf
         return self.app.subclass_with_self(TaskProducer,
-                reverse="amqp.TaskProducer",
+                reverse='amqp.TaskProducer',
                 exchange=self.default_exchange,
                 routing_key=conf.CELERY_DEFAULT_ROUTING_KEY,
                 serializer=conf.CELERY_TASK_SERIALIZER,

+ 3 - 3
celery/app/annotations.py

@@ -15,8 +15,8 @@ from __future__ import absolute_import
 from celery.utils.functional import firstmethod, mpromise
 from celery.utils.imports import instantiate
 
-_first_match = firstmethod("annotate")
-_first_match_any = firstmethod("annotate_any")
+_first_match = firstmethod('annotate')
+_first_match_any = firstmethod('annotate_any')
 
 
 def resolve_all(anno, task):
@@ -27,7 +27,7 @@ class MapAnnotation(dict):
 
     def annotate_any(self):
         try:
-            return dict(self["*"])
+            return dict(self['*'])
         except KeyError:
             pass
 

+ 35 - 35
celery/app/base.py

@@ -47,12 +47,12 @@ class Celery(object):
     SYSTEM = platforms.SYSTEM
     IS_OSX, IS_WINDOWS = platforms.IS_OSX, platforms.IS_WINDOWS
 
-    amqp_cls = "celery.app.amqp:AMQP"
+    amqp_cls = 'celery.app.amqp:AMQP'
     backend_cls = None
-    events_cls = "celery.events:Events"
-    loader_cls = "celery.loaders.app:AppLoader"
-    log_cls = "celery.app.log:Logging"
-    control_cls = "celery.app.control:Control"
+    events_cls = 'celery.events:Events'
+    loader_cls = 'celery.loaders.app:AppLoader'
+    log_cls = 'celery.app.log:Logging'
+    control_cls = 'celery.app.control:Control'
     registry_cls = TaskRegistry
     _pool = None
 
@@ -82,9 +82,9 @@ class Celery(object):
         # simplify pickling of the app object.
         self._preconf = {}
         if broker:
-            self._preconf["BROKER_URL"] = broker
+            self._preconf['BROKER_URL'] = broker
         if include:
-            self._preconf["CELERY_IMPORTS"] = include
+            self._preconf['CELERY_IMPORTS'] = include
 
         if self.set_as_current:
             self.set_current()
@@ -98,11 +98,11 @@ class Celery(object):
         pass
 
     def start(self, argv=None):
-        return instantiate("celery.bin.celery:CeleryCommand", app=self) \
+        return instantiate('celery.bin.celery:CeleryCommand', app=self) \
                     .execute_from_commandline(argv)
 
     def worker_main(self, argv=None):
-        return instantiate("celery.bin.celeryd:WorkerCommand", app=self) \
+        return instantiate('celery.bin.celeryd:WorkerCommand', app=self) \
                     .execute_from_commandline(argv)
 
     def task(self, *args, **opts):
@@ -135,14 +135,14 @@ class Celery(object):
         return inner_create_task_cls(**opts)
 
     def _task_from_fun(self, fun, **options):
-        base = options.pop("base", None) or self.Task
+        base = options.pop('base', None) or self.Task
 
         T = type(fun.__name__, (base, ), dict({
-                "app": self,
-                "accept_magic_kwargs": False,
-                "run": staticmethod(fun),
-                "__doc__": fun.__doc__,
-                "__module__": fun.__module__}, **options))()
+                'app': self,
+                'accept_magic_kwargs': False,
+                'run': staticmethod(fun),
+                '__doc__': fun.__doc__,
+                '__module__': fun.__module__}, **options))()
         task = self._tasks[T.name]  # return global instance.
         task.bind(self)
         return task
@@ -167,7 +167,7 @@ class Celery(object):
         del(self.conf)
         return self.loader.config_from_envvar(variable_name, silent=silent)
 
-    def config_from_cmdline(self, argv, namespace="celery"):
+    def config_from_cmdline(self, argv, namespace='celery'):
         self.conf.update(self.loader.cmdline_config_parser(argv, namespace))
 
     def send_task(self, name, args=None, kwargs=None, countdown=None,
@@ -175,11 +175,11 @@ class Celery(object):
             result_cls=None, expires=None, queues=None, **options):
         if self.conf.CELERY_ALWAYS_EAGER:  # pragma: no cover
             warnings.warn(AlwaysEagerIgnored(
-                "CELERY_ALWAYS_EAGER has no effect on send_task"))
+                'CELERY_ALWAYS_EAGER has no effect on send_task'))
 
         result_cls = result_cls or self.AsyncResult
         router = self.amqp.Router(queues)
-        options.setdefault("compression",
+        options.setdefault('compression',
                            self.conf.CELERY_MESSAGE_COMPRESSION)
         options = router.route(options, name, args, kwargs)
         with self.default_producer(publisher) as producer:
@@ -200,10 +200,10 @@ class Celery(object):
                     virtual_host or conf.BROKER_VHOST,
                     port or conf.BROKER_PORT,
                     transport=transport or conf.BROKER_TRANSPORT,
-                    insist=self.either("BROKER_INSIST", insist),
-                    ssl=self.either("BROKER_USE_SSL", ssl),
+                    insist=self.either('BROKER_INSIST', insist),
+                    ssl=self.either('BROKER_USE_SSL', ssl),
                     connect_timeout=self.either(
-                                "BROKER_CONNECTION_TIMEOUT", connect_timeout),
+                                'BROKER_CONNECTION_TIMEOUT', connect_timeout),
                     transport_options=dict(conf.BROKER_TRANSPORT_OPTIONS,
                                            **transport_options or {}))
 
@@ -238,7 +238,7 @@ class Celery(object):
         """
         @wraps(fun)
         def _inner(*args, **kwargs):
-            connection = kwargs.pop("connection", None)
+            connection = kwargs.pop('connection', None)
             with self.default_connection(connection) as c:
                 return fun(*args, **dict(kwargs, connection=c))
         return _inner
@@ -297,10 +297,10 @@ class Celery(object):
     def create_task_cls(self):
         """Creates a base task class using default configuration
         taken from this app."""
-        return self.subclass_with_self("celery.app.task:Task", name="Task",
-                                       attribute="_app", abstract=True)
+        return self.subclass_with_self('celery.app.task:Task', name='Task',
+                                       attribute='_app', abstract=True)
 
-    def subclass_with_self(self, Class, name=None, attribute="app",
+    def subclass_with_self(self, Class, name=None, attribute='app',
             reverse=None, **kw):
         """Subclass an app-compatible class by setting its app attribute
         to be this app instance.
@@ -312,7 +312,7 @@ class Celery(object):
         :param Class: The app-compatible class to subclass.
         :keyword name: Custom name for the target class.
         :keyword attribute: Name of the attribute holding the app,
-                            default is "app".
+                            default is 'app'.
 
         """
         Class = symbol_by_name(Class)
@@ -330,8 +330,8 @@ class Celery(object):
         return reduce(getattr, [self] + path.split('.'))
 
     def __repr__(self):
-        return "<%s %s:0x%x>" % (self.__class__.__name__,
-                                 self.main or "__main__", id(self), )
+        return '<%s %s:0x%x>' % (self.__class__.__name__,
+                                 self.main or '__main__', id(self), )
 
     def __reduce__(self):
         # Reduce only pickles the configuration changes,
@@ -347,19 +347,19 @@ class Celery(object):
 
     @cached_property
     def Worker(self):
-        return self.subclass_with_self("celery.apps.worker:Worker")
+        return self.subclass_with_self('celery.apps.worker:Worker')
 
     @cached_property
     def WorkController(self, **kwargs):
-        return self.subclass_with_self("celery.worker:WorkController")
+        return self.subclass_with_self('celery.worker:WorkController')
 
     @cached_property
     def Beat(self, **kwargs):
-        return self.subclass_with_self("celery.apps.beat:Beat")
+        return self.subclass_with_self('celery.apps.beat:Beat')
 
     @cached_property
     def TaskSet(self):
-        return self.subclass_with_self("celery.task.sets:TaskSet")
+        return self.subclass_with_self('celery.task.sets:TaskSet')
 
     @cached_property
     def Task(self):
@@ -371,15 +371,15 @@ class Celery(object):
 
     @cached_property
     def AsyncResult(self):
-        return self.subclass_with_self("celery.result:AsyncResult")
+        return self.subclass_with_self('celery.result:AsyncResult')
 
     @cached_property
     def GroupResult(self):
-        return self.subclass_with_self("celery.result:GroupResult")
+        return self.subclass_with_self('celery.result:GroupResult')
 
     @cached_property
     def TaskSetResult(self):  # XXX compat
-        return self.subclass_with_self("celery.result:TaskSetResult")
+        return self.subclass_with_self('celery.result:TaskSetResult')
 
     @property
     def pool(self):

+ 28 - 28
celery/app/builtins.py

@@ -52,7 +52,7 @@ def add_backend_cleanup_task(app):
 
     """
 
-    @app.task(name="celery.backend_cleanup")
+    @app.task(name='celery.backend_cleanup')
     def backend_cleanup():
         app.backend.cleanup()
     return backend_cleanup
@@ -69,7 +69,7 @@ def add_unlock_chord_task(app):
     from celery.canvas import subtask
     from celery import result as _res
 
-    @app.task(name="celery.chord_unlock", max_retries=None)
+    @app.task(name='celery.chord_unlock', max_retries=None)
     def unlock_chord(group_id, callback, interval=1, propagate=False,
             max_retries=None, result=None):
         result = _res.GroupResult(group_id, map(_res.AsyncResult, result))
@@ -85,7 +85,7 @@ def add_unlock_chord_task(app):
 def add_map_task(app):
     from celery.canvas import subtask
 
-    @app.task(name="celery.map")
+    @app.task(name='celery.map')
     def xmap(task, it):
         task = subtask(task).type
         return list(map(task, it))
@@ -95,7 +95,7 @@ def add_map_task(app):
 def add_starmap_task(app):
     from celery.canvas import subtask
 
-    @app.task(name="celery.starmap")
+    @app.task(name='celery.starmap')
     def xstarmap(task, it):
         task = subtask(task).type
         return list(starmap(task, it))
@@ -105,7 +105,7 @@ def add_starmap_task(app):
 def add_chunk_task(app):
     from celery.canvas import chunks as _chunks
 
-    @app.task(name="celery.chunks")
+    @app.task(name='celery.chunks')
     def chunks(task, it, n):
         return _chunks.apply_chunks(task, it, n)
 
@@ -118,7 +118,7 @@ def add_group_task(app):
 
     class Group(app.Task):
         app = _app
-        name = "celery.group"
+        name = 'celery.group'
         accept_magic_kwargs = False
 
         def run(self, tasks, result, group_id):
@@ -139,15 +139,15 @@ def add_group_task(app):
 
         def prepare(self, options, tasks, **kwargs):
             r = []
-            options["group_id"] = group_id = \
-                    options.setdefault("task_id", uuid())
+            options['group_id'] = group_id = \
+                    options.setdefault('task_id', uuid())
             for task in tasks:
                 opts = task.options
-                opts["group_id"] = group_id
+                opts['group_id'] = group_id
                 try:
-                    tid = opts["task_id"]
+                    tid = opts['task_id']
                 except KeyError:
-                    tid = opts["task_id"] = uuid()
+                    tid = opts['task_id'] = uuid()
                 r.append(self.AsyncResult(tid))
             return tasks, self.app.GroupResult(group_id, r), group_id
 
@@ -172,33 +172,33 @@ def add_chain_task(app):
 
     class Chain(app.Task):
         app = _app
-        name = "celery.chain"
+        name = 'celery.chain'
         accept_magic_kwargs = False
 
         def apply_async(self, args=(), kwargs={}, **options):
             if self.app.conf.CELERY_ALWAYS_EAGER:
                 return self.apply(args, kwargs, **options)
-            options.pop("publisher", None)
-            group_id = options.pop("group_id", None)
-            chord = options.pop("chord", None)
+            options.pop('publisher', None)
+            group_id = options.pop('group_id', None)
+            chord = options.pop('chord', None)
             tasks = [maybe_subtask(t).clone(
-                        task_id=options.pop("task_id", uuid()),
+                        task_id=options.pop('task_id', uuid()),
                         **options
                     )
-                    for t in kwargs["tasks"]]
+                    for t in kwargs['tasks']]
             reduce(lambda a, b: a.link(b), tasks)
             if group_id:
                 tasks[-1].set(group_id=group_id)
             if chord:
                 tasks[-1].set(chord=chord)
             tasks[0].apply_async()
-            results = [task.type.AsyncResult(task.options["task_id"])
+            results = [task.type.AsyncResult(task.options['task_id'])
                             for task in tasks]
             reduce(lambda a, b: a.set_parent(b), reversed(results))
             return results[-1]
 
         def apply(self, args=(), kwargs={}, **options):
-            tasks = [maybe_subtask(task).clone() for task in kwargs["tasks"]]
+            tasks = [maybe_subtask(task).clone() for task in kwargs['tasks']]
             res = prev = None
             for task in tasks:
                 res = task.apply((prev.get(), ) if prev else ())
@@ -219,7 +219,7 @@ def add_chord_task(app):
 
     class Chord(app.Task):
         app = _app
-        name = "celery.chord"
+        name = 'celery.chord'
         accept_magic_kwargs = False
         ignore_result = False
 
@@ -232,11 +232,11 @@ def add_chord_task(app):
             for task in header.tasks:
                 opts = task.options
                 try:
-                    tid = opts["task_id"]
+                    tid = opts['task_id']
                 except KeyError:
-                    tid = opts["task_id"] = uuid()
-                opts["chord"] = body
-                opts["group_id"] = group_id
+                    tid = opts['task_id'] = uuid()
+                opts['chord'] = body
+                opts['group_id'] = group_id
                 r.append(app.AsyncResult(tid))
             if eager:
                 return header.apply(task_id=group_id)
@@ -250,17 +250,17 @@ def add_chord_task(app):
         def apply_async(self, args=(), kwargs={}, task_id=None, **options):
             if self.app.conf.CELERY_ALWAYS_EAGER:
                 return self.apply(args, kwargs, **options)
-            header, body = (list(kwargs["header"]),
-                            maybe_subtask(kwargs["body"]))
+            header, body = (list(kwargs['header']),
+                            maybe_subtask(kwargs['body']))
 
-            callback_id = body.options.setdefault("task_id", task_id or uuid())
+            callback_id = body.options.setdefault('task_id', task_id or uuid())
             parent = super(Chord, self).apply_async((header, body), **options)
             body_result = self.AsyncResult(callback_id)
             body_result.parent = parent
             return body_result
 
         def apply(self, args=(), kwargs={}, propagate=True, **options):
-            body = kwargs["body"]
+            body = kwargs['body']
             res = super(Chord, self).apply(args, dict(kwargs, eager=True),
                                            **options)
             return maybe_subtask(body).apply(

+ 36 - 36
celery/app/control.py

@@ -52,32 +52,32 @@ class Inspect(object):
                                       timeout=self.timeout, reply=True))
 
     def report(self):
-        return self._request("report")
+        return self._request('report')
 
     def active(self, safe=False):
-        return self._request("dump_active", safe=safe)
+        return self._request('dump_active', safe=safe)
 
     def scheduled(self, safe=False):
-        return self._request("dump_schedule", safe=safe)
+        return self._request('dump_schedule', safe=safe)
 
     def reserved(self, safe=False):
-        return self._request("dump_reserved", safe=safe)
+        return self._request('dump_reserved', safe=safe)
 
     def stats(self):
-        return self._request("stats")
+        return self._request('stats')
 
     def revoked(self):
-        return self._request("dump_revoked")
+        return self._request('dump_revoked')
 
     def registered(self):
-        return self._request("dump_tasks")
+        return self._request('dump_tasks')
     registered_tasks = registered
 
     def ping(self):
-        return self._request("ping")
+        return self._request('ping')
 
     def active_queues(self):
-        return self._request("active_queues")
+        return self._request('active_queues')
 
 
 class Control(object):
@@ -85,11 +85,11 @@ class Control(object):
 
     def __init__(self, app=None):
         self.app = app_or_default(app)
-        self.mailbox = self.Mailbox("celeryd", type="fanout")
+        self.mailbox = self.Mailbox('celeryd', type='fanout')
 
     @cached_property
     def inspect(self):
-        return self.app.subclass_with_self(Inspect, reverse="control.inspect")
+        return self.app.subclass_with_self(Inspect, reverse='control.inspect')
 
     def purge(self, connection=None):
         """Discard all waiting tasks.
@@ -105,7 +105,7 @@ class Control(object):
     discard_all = purge
 
     def revoke(self, task_id, destination=None, terminate=False,
-            signal="SIGTERM", **kwargs):
+            signal='SIGTERM', **kwargs):
         """Tell all (or specific) workers to revoke a task by id.
 
         If a task is revoked, the workers will ignore the task and
@@ -120,10 +120,10 @@ class Control(object):
         See :meth:`broadcast` for supported keyword arguments.
 
         """
-        return self.broadcast("revoke", destination=destination,
-                              arguments={"task_id": task_id,
-                                         "terminate": terminate,
-                                         "signal": signal}, **kwargs)
+        return self.broadcast('revoke', destination=destination,
+                              arguments={'task_id': task_id,
+                                         'terminate': terminate,
+                                         'signal': signal}, **kwargs)
 
     def ping(self, destination=None, timeout=1, **kwargs):
         """Ping all (or specific) workers.
@@ -133,7 +133,7 @@ class Control(object):
         See :meth:`broadcast` for supported keyword arguments.
 
         """
-        return self.broadcast("ping", reply=True, destination=destination,
+        return self.broadcast('ping', reply=True, destination=destination,
                               timeout=timeout, **kwargs)
 
     def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
@@ -142,19 +142,19 @@ class Control(object):
 
         :param task_name: Name of task to change rate limit for.
         :param rate_limit: The rate limit as tasks per second, or a rate limit
-            string (`"100/m"`, etc.
+            string (`'100/m'`, etc.
             see :attr:`celery.task.base.Task.rate_limit` for
             more information).
 
         See :meth:`broadcast` for supported keyword arguments.
 
         """
-        return self.broadcast("rate_limit", destination=destination,
-                              arguments={"task_name": task_name,
-                                         "rate_limit": rate_limit},
+        return self.broadcast('rate_limit', destination=destination,
+                              arguments={'task_name': task_name,
+                                         'rate_limit': rate_limit},
                               **kwargs)
 
-    def add_consumer(self, queue, exchange=None, exchange_type="direct",
+    def add_consumer(self, queue, exchange=None, exchange_type='direct',
             routing_key=None, **options):
         """Tell all (or specific) workers to start consuming from a new queue.
 
@@ -169,17 +169,17 @@ class Control(object):
 
         :param queue: Name of queue to start consuming from.
         :keyword exchange: Optional name of exchange.
-        :keyword exchange_type: Type of exchange (defaults to "direct")
+        :keyword exchange_type: Type of exchange (defaults to 'direct')
             command to, when empty broadcast to all workers.
         :keyword routing_key: Optional routing key.
 
         See :meth:`broadcast` for supported keyword arguments.
 
         """
-        return self.broadcast("add_consumer",
-                arguments={"queue": queue, "exchange": exchange,
-                           "exchange_type": exchange_type,
-                           "routing_key": routing_key}, **options)
+        return self.broadcast('add_consumer',
+                arguments={'queue': queue, 'exchange': exchange,
+                           'exchange_type': exchange_type,
+                           'routing_key': routing_key}, **options)
 
     def cancel_consumer(self, queue, **kwargs):
         """Tell all (or specific) workers to stop consuming from ``queue``.
@@ -187,8 +187,8 @@ class Control(object):
         Supports the same keyword arguments as :meth:`broadcast`.
 
         """
-        return self.broadcast("cancel_consumer",
-                arguments={"queue": queue}, **kwargs)
+        return self.broadcast('cancel_consumer',
+                arguments={'queue': queue}, **kwargs)
 
 
     def time_limit(self, task_name, soft=None, hard=None, **kwargs):
@@ -202,17 +202,17 @@ class Control(object):
         Any additional keyword arguments are passed on to :meth:`broadcast`.
 
         """
-        return self.broadcast("time_limit",
-                              arguments={"task_name": task_name,
-                                         "hard": hard, "soft": soft}, **kwargs)
+        return self.broadcast('time_limit',
+                              arguments={'task_name': task_name,
+                                         'hard': hard, 'soft': soft}, **kwargs)
 
     def enable_events(self, destination=None, **kwargs):
         """Tell all (or specific) workers to enable events."""
-        return self.broadcast("enable_events", {}, destination, **kwargs)
+        return self.broadcast('enable_events', {}, destination, **kwargs)
 
     def disable_events(self, destination=None, **kwargs):
         """Tell all (or specific) workers to enable events."""
-        return self.broadcast("disable_events", {}, destination, **kwargs)
+        return self.broadcast('disable_events', {}, destination, **kwargs)
 
     def pool_grow(self, n=1, destination=None, **kwargs):
         """Tell all (or specific) workers to grow the pool by ``n``.
@@ -220,7 +220,7 @@ class Control(object):
         Supports the same arguments as :meth:`broadcast`.
 
         """
-        return self.broadcast("pool_grow", {}, destination, **kwargs)
+        return self.broadcast('pool_grow', {}, destination, **kwargs)
 
     def pool_shrink(self, n=1, destination=None, **kwargs):
         """Tell all (or specific) workers to shrink the pool by ``n``.
@@ -228,7 +228,7 @@ class Control(object):
         Supports the same arguments as :meth:`broadcast`.
 
         """
-        return self.broadcast("pool_shrink", {}, destination, **kwargs)
+        return self.broadcast('pool_shrink', {}, destination, **kwargs)
 
     def broadcast(self, command, arguments=None, destination=None,
             connection=None, reply=False, timeout=1, limit=None,

+ 149 - 149
celery/app/defaults.py

@@ -16,17 +16,17 @@ from datetime import timedelta
 from celery.utils import strtobool
 from celery.utils.functional import memoize
 
-is_jython = sys.platform.startswith("java")
-is_pypy = hasattr(sys, "pypy_version_info")
+is_jython = sys.platform.startswith('java')
+is_pypy = hasattr(sys, 'pypy_version_info')
 
-DEFAULT_POOL = "processes"
+DEFAULT_POOL = 'processes'
 if is_jython:
-    DEFAULT_POOL = "threads"
+    DEFAULT_POOL = 'threads'
 elif is_pypy:
     if sys.pypy_version_info[0:3] < (1, 5, 0):
-        DEFAULT_POOL = "solo"
+        DEFAULT_POOL = 'solo'
     else:
-        DEFAULT_POOL = "processes"
+        DEFAULT_POOL = 'processes'
 
 
 DEFAULT_PROCESS_LOG_FMT = """
@@ -36,9 +36,9 @@ DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
 DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
 %(task_name)s[%(task_id)s]: %(message)s"""
 
-_BROKER_OLD = {"deprecate_by": "2.5", "remove_by": "3.0", "alt": "BROKER_URL"}
-_REDIS_OLD = {"deprecate_by": "2.5", "remove_by": "3.0",
-              "alt": "URL form of CELERY_RESULT_BACKEND"}
+_BROKER_OLD = {'deprecate_by': '2.5', 'remove_by': '3.0', 'alt': 'BROKER_URL'}
+_REDIS_OLD = {'deprecate_by': '2.5', 'remove_by': '3.0',
+              'alt': 'URL form of CELERY_RESULT_BACKEND'}
 
 
 class Option(object):
@@ -50,7 +50,7 @@ class Option(object):
 
     def __init__(self, default=None, *args, **kwargs):
         self.default = default
-        self.type = kwargs.get("type") or "string"
+        self.type = kwargs.get('type') or 'string'
         for attr, value in kwargs.iteritems():
             setattr(self, attr, value)
 
@@ -59,153 +59,153 @@ class Option(object):
 
 
 NAMESPACES = {
-    "BROKER": {
-        "URL": Option(None, type="string"),
-        "CONNECTION_TIMEOUT": Option(4, type="float"),
-        "CONNECTION_RETRY": Option(True, type="bool"),
-        "CONNECTION_MAX_RETRIES": Option(100, type="int"),
-        "POOL_LIMIT": Option(10, type="int"),
-        "INSIST": Option(False, type="bool",
-                         deprecate_by="2.4", remove_by="3.0"),
-        "USE_SSL": Option(False, type="bool"),
-        "TRANSPORT": Option(type="string"),
-        "TRANSPORT_OPTIONS": Option({}, type="dict"),
-        "HOST": Option(type="string", **_BROKER_OLD),
-        "PORT": Option(type="int", **_BROKER_OLD),
-        "USER": Option(type="string", **_BROKER_OLD),
-        "PASSWORD": Option(type="string", **_BROKER_OLD),
-        "VHOST": Option(type="string", **_BROKER_OLD),
+    'BROKER': {
+        'URL': Option(None, type='string'),
+        'CONNECTION_TIMEOUT': Option(4, type='float'),
+        'CONNECTION_RETRY': Option(True, type='bool'),
+        'CONNECTION_MAX_RETRIES': Option(100, type='int'),
+        'POOL_LIMIT': Option(10, type='int'),
+        'INSIST': Option(False, type='bool',
+                         deprecate_by='2.4', remove_by='3.0'),
+        'USE_SSL': Option(False, type='bool'),
+        'TRANSPORT': Option(type='string'),
+        'TRANSPORT_OPTIONS': Option({}, type='dict'),
+        'HOST': Option(type='string', **_BROKER_OLD),
+        'PORT': Option(type='int', **_BROKER_OLD),
+        'USER': Option(type='string', **_BROKER_OLD),
+        'PASSWORD': Option(type='string', **_BROKER_OLD),
+        'VHOST': Option(type='string', **_BROKER_OLD),
     },
-    "CASSANDRA": {
-        "COLUMN_FAMILY": Option(type="string"),
-        "DETAILED_MODE": Option(False, type="bool"),
-        "KEYSPACE": Option(type="string"),
-        "READ_CONSISTENCY": Option(type="string"),
-        "SERVERS": Option(type="list"),
-        "WRITE_CONSISTENCY": Option(type="string"),
+    'CASSANDRA': {
+        'COLUMN_FAMILY': Option(type='string'),
+        'DETAILED_MODE': Option(False, type='bool'),
+        'KEYSPACE': Option(type='string'),
+        'READ_CONSISTENCY': Option(type='string'),
+        'SERVERS': Option(type='list'),
+        'WRITE_CONSISTENCY': Option(type='string'),
     },
-    "CELERY": {
-        "ACKS_LATE": Option(False, type="bool"),
-        "ALWAYS_EAGER": Option(False, type="bool"),
-        "AMQP_TASK_RESULT_EXPIRES": Option(type="float",
-                deprecate_by="2.5", remove_by="3.0",
-                alt="CELERY_TASK_RESULT_EXPIRES"),
-        "AMQP_TASK_RESULT_CONNECTION_MAX": Option(1, type="int",
-                remove_by="2.5", alt="BROKER_POOL_LIMIT"),
-        "ANNOTATIONS": Option(type="any"),
-        "BROADCAST_QUEUE": Option("celeryctl"),
-        "BROADCAST_EXCHANGE": Option("celeryctl"),
-        "BROADCAST_EXCHANGE_TYPE": Option("fanout"),
-        "CACHE_BACKEND": Option(),
-        "CACHE_BACKEND_OPTIONS": Option({}, type="dict"),
-        "CREATE_MISSING_QUEUES": Option(True, type="bool"),
-        "DEFAULT_RATE_LIMIT": Option(type="string"),
-        "DISABLE_RATE_LIMITS": Option(False, type="bool"),
-        "DEFAULT_ROUTING_KEY": Option("celery"),
-        "DEFAULT_QUEUE": Option("celery"),
-        "DEFAULT_EXCHANGE": Option("celery"),
-        "DEFAULT_EXCHANGE_TYPE": Option("direct"),
-        "DEFAULT_DELIVERY_MODE": Option(2, type="string"),
-        "EAGER_PROPAGATES_EXCEPTIONS": Option(False, type="bool"),
-        "ENABLE_UTC": Option(False, type="bool"),
-        "EVENT_SERIALIZER": Option("json"),
-        "IMPORTS": Option((), type="tuple"),
-        "INCLUDE": Option((), type="tuple"),
-        "IGNORE_RESULT": Option(False, type="bool"),
-        "MAX_CACHED_RESULTS": Option(5000, type="int"),
-        "MESSAGE_COMPRESSION": Option(type="string"),
-        "MONGODB_BACKEND_SETTINGS": Option(type="dict"),
-        "REDIS_HOST": Option(type="string", **_REDIS_OLD),
-        "REDIS_PORT": Option(type="int", **_REDIS_OLD),
-        "REDIS_DB": Option(type="int", **_REDIS_OLD),
-        "REDIS_PASSWORD": Option(type="string", **_REDIS_OLD),
-        "REDIS_MAX_CONNECTIONS": Option(type="int"),
-        "RESULT_BACKEND": Option(type="string"),
-        "RESULT_DB_SHORT_LIVED_SESSIONS": Option(False, type="bool"),
-        "RESULT_DBURI": Option(),
-        "RESULT_ENGINE_OPTIONS": Option(type="dict"),
-        "RESULT_EXCHANGE": Option("celeryresults"),
-        "RESULT_EXCHANGE_TYPE": Option("direct"),
-        "RESULT_SERIALIZER": Option("pickle"),
-        "RESULT_PERSISTENT": Option(False, type="bool"),
-        "ROUTES": Option(type="any"),
-        "SEND_EVENTS": Option(False, type="bool"),
-        "SEND_TASK_ERROR_EMAILS": Option(False, type="bool"),
-        "SEND_TASK_SENT_EVENT": Option(False, type="bool"),
-        "STORE_ERRORS_EVEN_IF_IGNORED": Option(False, type="bool"),
-        "TASK_ERROR_WHITELIST": Option((), type="tuple",
-            deprecate_by="2.5", remove_by="3.0"),
-        "TASK_PUBLISH_RETRY": Option(True, type="bool"),
-        "TASK_PUBLISH_RETRY_POLICY": Option({
-                "max_retries": 100,
-                "interval_start": 0,
-                "interval_max": 1,
-                "interval_step": 0.2}, type="dict"),
-        "TASK_RESULT_EXPIRES": Option(timedelta(days=1), type="float"),
-        "TASK_SERIALIZER": Option("pickle"),
-        "TIMEZONE": Option(type="string"),
-        "TRACK_STARTED": Option(False, type="bool"),
-        "REDIRECT_STDOUTS": Option(True, type="bool"),
-        "REDIRECT_STDOUTS_LEVEL": Option("WARNING"),
-        "QUEUES": Option(type="dict"),
-        "SECURITY_KEY": Option(type="string"),
-        "SECURITY_CERTIFICATE": Option(type="string"),
-        "SECURITY_CERT_STORE": Option(type="string"),
+    'CELERY': {
+        'ACKS_LATE': Option(False, type='bool'),
+        'ALWAYS_EAGER': Option(False, type='bool'),
+        'AMQP_TASK_RESULT_EXPIRES': Option(type='float',
+                deprecate_by='2.5', remove_by='3.0',
+                alt='CELERY_TASK_RESULT_EXPIRES'),
+        'AMQP_TASK_RESULT_CONNECTION_MAX': Option(1, type='int',
+                remove_by='2.5', alt='BROKER_POOL_LIMIT'),
+        'ANNOTATIONS': Option(type='any'),
+        'BROADCAST_QUEUE': Option('celeryctl'),
+        'BROADCAST_EXCHANGE': Option('celeryctl'),
+        'BROADCAST_EXCHANGE_TYPE': Option('fanout'),
+        'CACHE_BACKEND': Option(),
+        'CACHE_BACKEND_OPTIONS': Option({}, type='dict'),
+        'CREATE_MISSING_QUEUES': Option(True, type='bool'),
+        'DEFAULT_RATE_LIMIT': Option(type='string'),
+        'DISABLE_RATE_LIMITS': Option(False, type='bool'),
+        'DEFAULT_ROUTING_KEY': Option('celery'),
+        'DEFAULT_QUEUE': Option('celery'),
+        'DEFAULT_EXCHANGE': Option('celery'),
+        'DEFAULT_EXCHANGE_TYPE': Option('direct'),
+        'DEFAULT_DELIVERY_MODE': Option(2, type='string'),
+        'EAGER_PROPAGATES_EXCEPTIONS': Option(False, type='bool'),
+        'ENABLE_UTC': Option(False, type='bool'),
+        'EVENT_SERIALIZER': Option('json'),
+        'IMPORTS': Option((), type='tuple'),
+        'INCLUDE': Option((), type='tuple'),
+        'IGNORE_RESULT': Option(False, type='bool'),
+        'MAX_CACHED_RESULTS': Option(5000, type='int'),
+        'MESSAGE_COMPRESSION': Option(type='string'),
+        'MONGODB_BACKEND_SETTINGS': Option(type='dict'),
+        'REDIS_HOST': Option(type='string', **_REDIS_OLD),
+        'REDIS_PORT': Option(type='int', **_REDIS_OLD),
+        'REDIS_DB': Option(type='int', **_REDIS_OLD),
+        'REDIS_PASSWORD': Option(type='string', **_REDIS_OLD),
+        'REDIS_MAX_CONNECTIONS': Option(type='int'),
+        'RESULT_BACKEND': Option(type='string'),
+        'RESULT_DB_SHORT_LIVED_SESSIONS': Option(False, type='bool'),
+        'RESULT_DBURI': Option(),
+        'RESULT_ENGINE_OPTIONS': Option(type='dict'),
+        'RESULT_EXCHANGE': Option('celeryresults'),
+        'RESULT_EXCHANGE_TYPE': Option('direct'),
+        'RESULT_SERIALIZER': Option('pickle'),
+        'RESULT_PERSISTENT': Option(False, type='bool'),
+        'ROUTES': Option(type='any'),
+        'SEND_EVENTS': Option(False, type='bool'),
+        'SEND_TASK_ERROR_EMAILS': Option(False, type='bool'),
+        'SEND_TASK_SENT_EVENT': Option(False, type='bool'),
+        'STORE_ERRORS_EVEN_IF_IGNORED': Option(False, type='bool'),
+        'TASK_ERROR_WHITELIST': Option((), type='tuple',
+            deprecate_by='2.5', remove_by='3.0'),
+        'TASK_PUBLISH_RETRY': Option(True, type='bool'),
+        'TASK_PUBLISH_RETRY_POLICY': Option({
+                'max_retries': 100,
+                'interval_start': 0,
+                'interval_max': 1,
+                'interval_step': 0.2}, type='dict'),
+        'TASK_RESULT_EXPIRES': Option(timedelta(days=1), type='float'),
+        'TASK_SERIALIZER': Option('pickle'),
+        'TIMEZONE': Option(type='string'),
+        'TRACK_STARTED': Option(False, type='bool'),
+        'REDIRECT_STDOUTS': Option(True, type='bool'),
+        'REDIRECT_STDOUTS_LEVEL': Option('WARNING'),
+        'QUEUES': Option(type='dict'),
+        'SECURITY_KEY': Option(type='string'),
+        'SECURITY_CERTIFICATE': Option(type='string'),
+        'SECURITY_CERT_STORE': Option(type='string'),
     },
-    "CELERYD": {
-        "AUTOSCALER": Option("celery.worker.autoscale.Autoscaler"),
-        "AUTORELOADER": Option("celery.worker.autoreload.Autoreloader"),
-        "BOOT_STEPS": Option((), type="tuple"),
-        "CONCURRENCY": Option(0, type="int"),
-        "TIMER": Option(type="string"),
-        "TIMER_PRECISION": Option(1.0, type="float"),
-        "FORCE_EXECV": Option(True, type="bool"),
-        "HIJACK_ROOT_LOGGER": Option(True, type="bool"),
-        "CONSUMER": Option(type="string"),
-        "LOG_FORMAT": Option(DEFAULT_PROCESS_LOG_FMT),
-        "LOG_COLOR": Option(type="bool"),
-        "LOG_LEVEL": Option("WARN", deprecate_by="2.4", remove_by="3.0",
-                            alt="--loglevel argument"),
-        "LOG_FILE": Option(deprecate_by="2.4", remove_by="3.0"),
-        "MEDIATOR": Option("celery.worker.mediator.Mediator"),
-        "MAX_TASKS_PER_CHILD": Option(type="int"),
-        "POOL": Option(DEFAULT_POOL),
-        "POOL_PUTLOCKS": Option(True, type="bool"),
-        "PREFETCH_MULTIPLIER": Option(4, type="int"),
-        "STATE_DB": Option(),
-        "TASK_LOG_FORMAT": Option(DEFAULT_TASK_LOG_FMT),
-        "TASK_SOFT_TIME_LIMIT": Option(type="float"),
-        "TASK_TIME_LIMIT": Option(type="float"),
-        "WORKER_LOST_WAIT": Option(10.0, type="float")
+    'CELERYD': {
+        'AUTOSCALER': Option('celery.worker.autoscale.Autoscaler'),
+        'AUTORELOADER': Option('celery.worker.autoreload.Autoreloader'),
+        'BOOT_STEPS': Option((), type='tuple'),
+        'CONCURRENCY': Option(0, type='int'),
+        'TIMER': Option(type='string'),
+        'TIMER_PRECISION': Option(1.0, type='float'),
+        'FORCE_EXECV': Option(True, type='bool'),
+        'HIJACK_ROOT_LOGGER': Option(True, type='bool'),
+        'CONSUMER': Option(type='string'),
+        'LOG_FORMAT': Option(DEFAULT_PROCESS_LOG_FMT),
+        'LOG_COLOR': Option(type='bool'),
+        'LOG_LEVEL': Option('WARN', deprecate_by='2.4', remove_by='3.0',
+                            alt='--loglevel argument'),
+        'LOG_FILE': Option(deprecate_by='2.4', remove_by='3.0'),
+        'MEDIATOR': Option('celery.worker.mediator.Mediator'),
+        'MAX_TASKS_PER_CHILD': Option(type='int'),
+        'POOL': Option(DEFAULT_POOL),
+        'POOL_PUTLOCKS': Option(True, type='bool'),
+        'PREFETCH_MULTIPLIER': Option(4, type='int'),
+        'STATE_DB': Option(),
+        'TASK_LOG_FORMAT': Option(DEFAULT_TASK_LOG_FMT),
+        'TASK_SOFT_TIME_LIMIT': Option(type='float'),
+        'TASK_TIME_LIMIT': Option(type='float'),
+        'WORKER_LOST_WAIT': Option(10.0, type='float')
     },
-    "CELERYBEAT": {
-        "SCHEDULE": Option({}, type="dict"),
-        "SCHEDULER": Option("celery.beat.PersistentScheduler"),
-        "SCHEDULE_FILENAME": Option("celerybeat-schedule"),
-        "MAX_LOOP_INTERVAL": Option(0, type="float"),
-        "LOG_LEVEL": Option("INFO", deprecate_by="2.4", remove_by="3.0"),
-        "LOG_FILE": Option(deprecate_by="2.4", remove_by="3.0"),
+    'CELERYBEAT': {
+        'SCHEDULE': Option({}, type='dict'),
+        'SCHEDULER': Option('celery.beat.PersistentScheduler'),
+        'SCHEDULE_FILENAME': Option('celerybeat-schedule'),
+        'MAX_LOOP_INTERVAL': Option(0, type='float'),
+        'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='3.0'),
+        'LOG_FILE': Option(deprecate_by='2.4', remove_by='3.0'),
     },
-    "CELERYMON": {
-        "LOG_LEVEL": Option("INFO", deprecate_by="2.4", remove_by="3.0"),
-        "LOG_FILE": Option(deprecate_by="2.4", remove_by="3.0"),
-        "LOG_FORMAT": Option(DEFAULT_LOG_FMT),
+    'CELERYMON': {
+        'LOG_LEVEL': Option('INFO', deprecate_by='2.4', remove_by='3.0'),
+        'LOG_FILE': Option(deprecate_by='2.4', remove_by='3.0'),
+        'LOG_FORMAT': Option(DEFAULT_LOG_FMT),
     },
-    "EMAIL": {
-        "HOST": Option("localhost"),
-        "PORT": Option(25, type="int"),
-        "HOST_USER": Option(),
-        "HOST_PASSWORD": Option(),
-        "TIMEOUT": Option(2, type="float"),
-        "USE_SSL": Option(False, type="bool"),
-        "USE_TLS": Option(False, type="bool"),
+    'EMAIL': {
+        'HOST': Option('localhost'),
+        'PORT': Option(25, type='int'),
+        'HOST_USER': Option(),
+        'HOST_PASSWORD': Option(),
+        'TIMEOUT': Option(2, type='float'),
+        'USE_SSL': Option(False, type='bool'),
+        'USE_TLS': Option(False, type='bool'),
     },
-    "SERVER_EMAIL": Option("celery@localhost"),
-    "ADMINS": Option((), type="tuple"),
+    'SERVER_EMAIL': Option('celery@localhost'),
+    'ADMINS': Option((), type='tuple'),
 }
 
 
-def flatten(d, ns=""):
+def flatten(d, ns=''):
     stack = deque([(ns, d)])
     while stack:
         name, space = stack.popleft()
@@ -221,7 +221,7 @@ def find_deprecated_settings(source):
     from celery.utils import warn_deprecated
     for name, opt in flatten(NAMESPACES):
         if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
-            warn_deprecated(description="The %r setting" % (name, ),
+            warn_deprecated(description='The %r setting' % (name, ),
                             deprecation=opt.deprecate_by,
                             removal=opt.remove_by,
                             alternative=opt.alt)
@@ -229,7 +229,7 @@ def find_deprecated_settings(source):
 
 
 @memoize(maxsize=None)
-def find(name, namespace="celery"):
+def find(name, namespace='celery'):
     # - Try specified namespace first.
     namespace = namespace.upper()
     try:

+ 13 - 13
celery/app/log.py

@@ -41,8 +41,8 @@ class TaskFormatter(ColorFormatter):
             record.__dict__.update(task_id=task.request.id,
                                    task_name=task.name)
         else:
-            record.__dict__.setdefault("task_name", "???")
-            record.__dict__.setdefault("task_id", "???")
+            record.__dict__.setdefault('task_name', '???')
+            record.__dict__.setdefault('task_id', '???')
         return ColorFormatter.format(self, record)
 
 
@@ -60,17 +60,17 @@ class Logging(object):
         self.colorize = self.app.conf.CELERYD_LOG_COLOR
 
     def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
-            redirect_level="WARNING"):
+            redirect_level='WARNING'):
         handled = self.setup_logging_subsystem(loglevel, logfile)
         if not handled:
-            logger = get_logger("celery.redirected")
+            logger = get_logger('celery.redirected')
             if redirect_stdouts:
                 self.redirect_stdouts_to_logger(logger,
                                 loglevel=redirect_level)
         os.environ.update(
-            CELERY_LOG_LEVEL=str(loglevel) if loglevel else "",
-            CELERY_LOG_FILE=str(logfile) if logfile else "",
-            CELERY_LOG_REDIRECT="1" if redirect_stdouts else "",
+            CELERY_LOG_LEVEL=str(loglevel) if loglevel else '',
+            CELERY_LOG_FILE=str(logfile) if logfile else '',
+            CELERY_LOG_REDIRECT='1' if redirect_stdouts else '',
             CELERY_LOG_REDIRECT_LEVEL=str(redirect_level))
 
     def setup_logging_subsystem(self, loglevel=None, logfile=None,
@@ -107,7 +107,7 @@ class Logging(object):
 
         # This is a hack for multiprocessing's fork+exec, so that
         # logging before Process.run works.
-        logfile_name = logfile if isinstance(logfile, basestring) else ""
+        logfile_name = logfile if isinstance(logfile, basestring) else ''
         os.environ.update(_MP_FORK_LOGLEVEL_=str(loglevel),
                           _MP_FORK_LOGFILE_=logfile_name,
                           _MP_FORK_LOGFORMAT_=format)
@@ -127,7 +127,7 @@ class Logging(object):
         if colorize is None:
             colorize = self.supports_color(logfile)
 
-        logger = self.setup_handlers(get_logger("celery.task"),
+        logger = self.setup_handlers(get_logger('celery.task'),
                                      logfile, format, colorize,
                                      formatter=TaskFormatter, **kwargs)
         logger.setLevel(loglevel)
@@ -181,7 +181,7 @@ class Logging(object):
         """Create log handler with either a filename, an open stream
         or :const:`None` (stderr)."""
         logfile = sys.__stderr__ if logfile is None else logfile
-        if hasattr(logfile, "write"):
+        if hasattr(logfile, 'write'):
             return logging.StreamHandler(logfile)
         return WatchedFileHandler(logfile)
 
@@ -191,12 +191,12 @@ class Logging(object):
 
     def _is_configured(self, logger):
         return self._has_handler(logger) and not getattr(
-                logger, "_rudimentary_setup", False)
+                logger, '_rudimentary_setup', False)
 
-    def setup_logger(self, name="celery", *args, **kwargs):
+    def setup_logger(self, name='celery', *args, **kwargs):
         """Deprecated: No longer used."""
         self.setup_logging_subsystem(*args, **kwargs)
         return logging.root
 
-    def get_default_logger(self, name="celery", **kwargs):
+    def get_default_logger(self, name='celery', **kwargs):
         return get_logger(name)

+ 3 - 3
celery/app/registry.py

@@ -39,16 +39,16 @@ class TaskRegistry(dict):
 
         """
         try:
-            self.pop(getattr(name, "name", name))
+            self.pop(getattr(name, 'name', name))
         except KeyError:
             raise self.NotRegistered(name)
 
     # -- these methods are irrelevant now and will be removed in 3.0
     def regular(self):
-        return self.filter_types("regular")
+        return self.filter_types('regular')
 
     def periodic(self):
-        return self.filter_types("periodic")
+        return self.filter_types('periodic')
 
     def filter_types(self, type):
         return dict((name, task) for name, task in self.iteritems()

+ 6 - 6
celery/app/routes.py

@@ -14,7 +14,7 @@ from celery.utils import lpmerge
 from celery.utils.functional import firstmethod, mpromise
 from celery.utils.imports import instantiate
 
-_first_route = firstmethod("route_for_task")
+_first_route = firstmethod('route_for_task')
 
 
 class MapRoute(object):
@@ -45,7 +45,7 @@ class Router(object):
             route = self.lookup_route(task, args, kwargs)
             if route:  # expands 'queue' in route.
                 return lpmerge(self.expand_destination(route), options)
-        if "queue" not in options:
+        if 'queue' not in options:
             options = lpmerge(self.expand_destination(
                               self.app.conf.CELERY_DEFAULT_QUEUE), options)
         return options
@@ -57,7 +57,7 @@ class Router(object):
         else:
             # can use defaults from configured queue, but override specific
             # things (like the routing_key): great for topic exchanges.
-            queue = route.pop("queue", None)
+            queue = route.pop('queue', None)
 
         if queue:  # expand config from configured queue.
             try:
@@ -65,13 +65,13 @@ class Router(object):
             except KeyError:
                 if not self.create_missing:
                     raise QueueNotFound(
-                        "Queue %r is not defined in CELERY_QUEUES" % queue)
-                for key in "exchange", "routing_key":
+                        'Queue %r is not defined in CELERY_QUEUES' % queue)
+                for key in 'exchange', 'routing_key':
                     if route.get(key) is None:
                         route[key] = queue
                 dest = self.app.amqp.queues.add(queue, **route).as_dict()
             # needs to be declared by publisher
-            dest["queue"] = queue
+            dest['queue'] = queue
             return lpmerge(dest, route)
         return route
 

+ 64 - 64
celery/app/task.py

@@ -32,16 +32,16 @@ from .registry import _unpickle_task
 
 #: extracts attributes related to publishing a message from an object.
 extract_exec_options = mattrgetter(
-    "queue", "routing_key", "exchange",
-    "immediate", "mandatory", "priority", "expires",
-    "serializer", "delivery_mode", "compression",
+    'queue', 'routing_key', 'exchange',
+    'immediate', 'mandatory', 'priority', 'expires',
+    'serializer', 'delivery_mode', 'compression',
 )
 
 #: Billiard sets this when execv is enabled.
 #: We use it to find out the name of the original ``__main__``
 #: module, so that we can properly rewrite the name of the
 #: task to be that of ``App.main``.
-MP_MAIN_FILE = os.environ.get("MP_MAIN_FILE") or None
+MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') or None
 
 
 class Context(object):
@@ -79,7 +79,7 @@ class Context(object):
             return default
 
     def __repr__(self):
-        return "<Context: %r>" % (vars(self, ))
+        return '<Context: %r>' % (vars(self, ))
 
     @property
     def children(self):
@@ -102,27 +102,27 @@ class TaskType(type):
 
     def __new__(cls, name, bases, attrs):
         new = super(TaskType, cls).__new__
-        task_module = attrs.get("__module__") or "__main__"
+        task_module = attrs.get('__module__') or '__main__'
 
         # - Abstract class: abstract attribute should not be inherited.
-        if attrs.pop("abstract", None) or not attrs.get("autoregister", True):
+        if attrs.pop('abstract', None) or not attrs.get('autoregister', True):
             return new(cls, name, bases, attrs)
 
         # The 'app' attribute is now a property, with the real app located
         # in the '_app' attribute.  Previously this was a regular attribute,
         # so we should support classes defining it.
-        _app1, _app2 = attrs.pop("_app", None), attrs.pop("app", None)
-        app = attrs["_app"] = _app1 or _app2 or current_app
+        _app1, _app2 = attrs.pop('_app', None), attrs.pop('app', None)
+        app = attrs['_app'] = _app1 or _app2 or current_app
 
         # - Automatically generate missing/empty name.
         autoname = False
-        if not attrs.get("name"):
+        if not attrs.get('name'):
             try:
                 module_name = sys.modules[task_module].__name__
             except KeyError:  # pragma: no cover
                 # Fix for manage.py shell_plus (Issue #366).
                 module_name = task_module
-            attrs["name"] = '.'.join(filter(None, [module_name, name]))
+            attrs['name'] = '.'.join(filter(None, [module_name, name]))
             autoname = True
 
         # - Create and register class.
@@ -137,11 +137,11 @@ class TaskType(type):
         # - to match App.main.
         if MP_MAIN_FILE and sys.modules[task_module].__file__ == MP_MAIN_FILE:
             # - see comment about :envvar:`MP_MAIN_FILE` above.
-            task_module = "__main__"
-        if autoname and task_module == "__main__" and app.main:
-            attrs["name"] = '.'.join([app.main, name])
+            task_module = '__main__'
+        if autoname and task_module == '__main__' and app.main:
+            attrs['name'] = '.'.join([app.main, name])
 
-        task_name = attrs["name"]
+        task_name = attrs['name']
         if task_name not in tasks:
             tasks.register(new(cls, name, bases, attrs))
         instance = tasks[task_name]
@@ -150,8 +150,8 @@ class TaskType(type):
 
     def __repr__(cls):
         if cls._app:
-            return "<class %s of %s>" % (cls.__name__, cls._app, )
-        return "<unbound %s>" % (cls.__name__, )
+            return '<class %s of %s>' % (cls.__name__, cls._app, )
+        return '<unbound %s>' % (cls.__name__, )
 
 
 class Task(object):
@@ -169,7 +169,7 @@ class Task(object):
     MaxRetriesExceededError = MaxRetriesExceededError
 
     #: Execution strategy used, or the qualified name of one.
-    Strategy = "celery.worker.strategy:default"
+    Strategy = 'celery.worker.strategy:default'
 
     #: This is the instance bound to if the task is a method of a class.
     __self__ = None
@@ -196,8 +196,8 @@ class Task(object):
     default_retry_delay = 3 * 60
 
     #: Rate limit for this task type.  Examples: :const:`None` (no rate
-    #: limit), `"100/s"` (hundred tasks a second), `"100/m"` (hundred tasks
-    #: a minute),`"100/h"` (hundred tasks an hour)
+    #: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks
+    #: a minute),`'100/h'` (hundred tasks an hour)
     rate_limit = None
 
     #: If enabled the worker will not store task state and return values
@@ -214,7 +214,7 @@ class Task(object):
     send_error_emails = False
 
     #: The name of a serializer that are registered with
-    #: :mod:`kombu.serialization.registry`.  Default is `"pickle"`.
+    #: :mod:`kombu.serialization.registry`.  Default is `'pickle'`.
     serializer = None
 
     #: Hard time limit.
@@ -231,12 +231,12 @@ class Task(object):
     #: If disabled this task won't be registered automatically.
     autoregister = True
 
-    #: If enabled the task will report its status as "started" when the task
+    #: If enabled the task will report its status as 'started' when the task
     #: is executed by a worker.  Disabled by default as the normal behaviour
     #: is to not report that level of granularity.  Tasks are either pending,
     #: finished, or waiting to be retried.
     #:
-    #: Having a "started" status can be useful for when there are long
+    #: Having a 'started' status can be useful for when there are long
     #: running tasks and there is a need to report which task is currently
     #: running.
     #:
@@ -262,14 +262,14 @@ class Task(object):
     __bound__ = False
 
     from_config = (
-        ("send_error_emails", "CELERY_SEND_TASK_ERROR_EMAILS"),
-        ("serializer", "CELERY_TASK_SERIALIZER"),
-        ("rate_limit", "CELERY_DEFAULT_RATE_LIMIT"),
-        ("track_started", "CELERY_TRACK_STARTED"),
-        ("acks_late", "CELERY_ACKS_LATE"),
-        ("ignore_result", "CELERY_IGNORE_RESULT"),
-        ("store_errors_even_if_ignored",
-            "CELERY_STORE_ERRORS_EVEN_IF_IGNORED"),
+        ('send_error_emails', 'CELERY_SEND_TASK_ERROR_EMAILS'),
+        ('serializer', 'CELERY_TASK_SERIALIZER'),
+        ('rate_limit', 'CELERY_DEFAULT_RATE_LIMIT'),
+        ('track_started', 'CELERY_TRACK_STARTED'),
+        ('acks_late', 'CELERY_ACKS_LATE'),
+        ('ignore_result', 'CELERY_IGNORE_RESULT'),
+        ('store_errors_even_if_ignored',
+            'CELERY_STORE_ERRORS_EVEN_IF_IGNORED'),
     )
 
     __bound__ = False
@@ -332,7 +332,7 @@ class Task(object):
     @classmethod
     def add_around(self, attr, around):
         orig = getattr(self, attr)
-        if getattr(orig, "__wrapped__", None):
+        if getattr(orig, '__wrapped__', None):
             orig = orig.__wrapped__
         meth = around(orig)
         meth.__wrapped__ = orig
@@ -354,7 +354,7 @@ class Task(object):
 
     def run(self, *args, **kwargs):
         """The body of the task executed by workers."""
-        raise NotImplementedError("Tasks must define the run method.")
+        raise NotImplementedError('Tasks must define the run method.')
 
     def start_strategy(self, app, consumer):
         return instantiate(self.Strategy, self, app, consumer)
@@ -559,26 +559,26 @@ class Task(object):
         # so just raise the original exception.
         if request.called_directly:
             maybe_reraise()
-            raise exc or RetryTaskError("Task can be retried", None)
+            raise exc or RetryTaskError("Task can be retried', None)
 
         if delivery_info:
-            options.setdefault("exchange", delivery_info.get("exchange"))
-            options.setdefault("routing_key", delivery_info.get("routing_key"))
+            options.setdefault('exchange', delivery_info.get('exchange'))
+            options.setdefault('routing_key', delivery_info.get('routing_key'))
 
         if not eta and countdown is None:
             countdown = self.default_retry_delay
 
-        options.update({"retries": request.retries + 1,
-                        "task_id": request.id,
-                        "countdown": countdown,
-                        "eta": eta})
+        options.update({'retries': request.retries + 1,
+                        'task_id': request.id,
+                        'countdown': countdown,
+                        'eta': eta})
 
-        if max_retries is not None and options["retries"] > max_retries:
+        if max_retries is not None and options['retries'] > max_retries:
             if exc:
                 maybe_reraise()
             raise self.MaxRetriesExceededError(
-                    "Can't retry %s[%s] args:%s kwargs:%s" % (
-                        self.name, options["task_id"], args, kwargs))
+                    """Can't retry %s[%s] args:%s kwargs:%s""" % (
+                        self.name, options['task_id'], args, kwargs))
 
         # If task was executed eagerly using apply(),
         # then the retry must also be executed eagerly.
@@ -586,8 +586,8 @@ class Task(object):
             self.apply(args=args, kwargs=kwargs, **options).get()
         else:
             self.apply_async(args=args, kwargs=kwargs, **options)
-        ret = RetryTaskError(eta and "Retry at %s" % eta
-                                  or "Retry in %s secs." % countdown, exc)
+        ret = RetryTaskError(eta and 'Retry at %s' % eta
+                                  or 'Retry in %s secs.' % countdown, exc)
         if throw:
             raise ret
         return ret
@@ -610,28 +610,28 @@ class Task(object):
         app = self._get_app()
         args = args or []
         kwargs = kwargs or {}
-        task_id = options.get("task_id") or uuid()
-        retries = options.get("retries", 0)
-        throw = app.either("CELERY_EAGER_PROPAGATES_EXCEPTIONS",
-                           options.pop("throw", None))
+        task_id = options.get('task_id') or uuid()
+        retries = options.get('retries', 0)
+        throw = app.either('CELERY_EAGER_PROPAGATES_EXCEPTIONS',
+                           options.pop('throw', None))
 
         # Make sure we get the task instance, not class.
         task = app._tasks[self.name]
 
-        request = {"id": task_id,
-                   "retries": retries,
-                   "is_eager": True,
-                   "logfile": options.get("logfile"),
-                   "loglevel": options.get("loglevel", 0),
-                   "delivery_info": {"is_eager": True}}
+        request = {'id': task_id,
+                   'retries': retries,
+                   'is_eager': True,
+                   'logfile': options.get('logfile'),
+                   'loglevel': options.get('loglevel', 0),
+                   'delivery_info': {'is_eager': True}}
         if self.accept_magic_kwargs:
-            default_kwargs = {"task_name": task.name,
-                              "task_id": task_id,
-                              "task_retries": retries,
-                              "task_is_eager": True,
-                              "logfile": options.get("logfile"),
-                              "loglevel": options.get("loglevel", 0),
-                              "delivery_info": {"is_eager": True}}
+            default_kwargs = {'task_name': task.name,
+                              'task_id': task_id,
+                              'task_retries': retries,
+                              'task_is_eager': True,
+                              'logfile': options.get('logfile'),
+                              'loglevel': options.get('loglevel', 0),
+                              'delivery_info': {'is_eager': True}}
             supported_keys = fun_takes_kwargs(task.run, default_kwargs)
             extend_with = dict((key, val)
                                     for key, val in default_kwargs.items()
@@ -772,7 +772,7 @@ class Task(object):
 
     def send_error_email(self, context, exc, **kwargs):
         if self.send_error_emails and \
-                not getattr(self, "disable_error_emails", None):
+                not getattr(self, 'disable_error_emails', None):
             self.ErrorMail(self, **kwargs).send(context, exc)
 
     def execute(self, request, pool, loglevel, logfile, **kwargs):
@@ -796,7 +796,7 @@ class Task(object):
 
     def __repr__(self):
         """`repr(task)`"""
-        return "<@task: %s>" % (self.name, )
+        return '<@task: %s>' % (self.name, )
 
     def _get_logger(self, **kwargs):
         """Get task-aware logger object."""

+ 25 - 25
celery/app/utils.py

@@ -18,7 +18,7 @@ from celery.utils.imports import qualname
 
 from .defaults import find
 
-SETTINGS_INFO = """%s %s"""
+SETTINGS_INFO = '%s %s'
 
 BUGREPORT_INFO = """
 software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s
@@ -36,14 +36,14 @@ class Settings(datastructures.ConfigurationView):
     @property
     def CELERY_RESULT_BACKEND(self):
         """Resolves deprecated alias ``CELERY_BACKEND``."""
-        return self.first("CELERY_RESULT_BACKEND", "CELERY_BACKEND")
+        return self.first('CELERY_RESULT_BACKEND', 'CELERY_BACKEND')
 
     @property
     def BROKER_TRANSPORT(self):
         """Resolves compat aliases :setting:`BROKER_BACKEND`
         and :setting:`CARROT_BACKEND`."""
-        return self.first("BROKER_TRANSPORT",
-                          "BROKER_BACKEND", "CARROT_BACKEND")
+        return self.first('BROKER_TRANSPORT',
+                          'BROKER_BACKEND', 'CARROT_BACKEND')
 
     @property
     def BROKER_BACKEND(self):
@@ -52,24 +52,24 @@ class Settings(datastructures.ConfigurationView):
 
     @property
     def BROKER_HOST(self):
-        return (os.environ.get("CELERY_BROKER_URL") or
-                self.first("BROKER_URL", "BROKER_HOST"))
+        return (os.environ.get('CELERY_BROKER_URL') or
+                self.first('BROKER_URL', 'BROKER_HOST'))
 
     def without_defaults(self):
         # the last stash is the default settings, so just skip that
         return Settings({}, self._order[:-1])
 
-    def find_value_for_key(self, name, namespace="celery"):
+    def find_value_for_key(self, name, namespace='celery'):
         return self.get_by_parts(*self.find_option(name, namespace)[:-1])
 
-    def find_option(self, name, namespace="celery"):
+    def find_option(self, name, namespace='celery'):
         return find(name, namespace)
 
     def get_by_parts(self, *parts):
-        return self["_".join(filter(None, parts))]
+        return self['_'.join(filter(None, parts))]
 
     def humanize(self):
-        return "\n".join(SETTINGS_INFO % (key + ':', pretty(value, width=50))
+        return '\n'.join(SETTINGS_INFO % (key + ':', pretty(value, width=50))
                     for key, value in self.without_defaults().iteritems())
 
 
@@ -83,7 +83,7 @@ class AppPickler(object):
         return app
 
     def prepare(self, app, **kwargs):
-        app.conf.update(kwargs["changes"])
+        app.conf.update(kwargs['changes'])
 
     def build_kwargs(self, *args):
         return self.build_standard_kwargs(*args)
@@ -110,21 +110,21 @@ def bugreport(app):
 
     try:
         trans = app.broker_connection().transport
-        driver_v = "%s:%s" % (trans.driver_name, trans.driver_version())
+        driver_v = '%s:%s' % (trans.driver_name, trans.driver_version())
     except Exception:
-        driver_v = ""
+        driver_v = ''
 
     return BUGREPORT_INFO % {
-        "system": _platform.system(),
-        "arch": ', '.join(filter(None, _platform.architecture())),
-        "py_i": platforms.pyimplementation(),
-        "celery_v": celery.__version__,
-        "kombu_v": kombu.__version__,
-        "billiard_v": billiard.__version__,
-        "py_v": _platform.python_version(),
-        "driver_v": driver_v,
-        "transport": app.conf.BROKER_TRANSPORT or "amqp",
-        "results": app.conf.CELERY_RESULT_BACKEND or "disabled",
-        "human_settings": app.conf.humanize(),
-        "loader": qualname(app.loader.__class__),
+        'system': _platform.system(),
+        'arch': ', '.join(filter(None, _platform.architecture())),
+        'py_i': platforms.pyimplementation(),
+        'celery_v': celery.__version__,
+        'kombu_v': kombu.__version__,
+        'billiard_v': billiard.__version__,
+        'py_v': _platform.python_version(),
+        'driver_v': driver_v,
+        'transport': app.conf.BROKER_TRANSPORT or 'amqp',
+        'results': app.conf.CELERY_RESULT_BACKEND or 'disabled',
+        'human_settings': app.conf.humanize(),
+        'loader': qualname(app.loader.__class__),
     }

+ 24 - 24
celery/apps/beat.py

@@ -3,7 +3,7 @@
     celery.apps.beat
     ~~~~~~~~~~~~~~~~
 
-    This module is the "program-version" of :mod:`celery.beat`.
+    This module is the 'program-version' of :mod:`celery.beat`.
 
     It does everything necessary to run that module
     as an actual application, like installing signal handlers
@@ -32,17 +32,17 @@ Configuration ->
     . maxinterval -> %(hmax_interval)s (%(max_interval)ss)
 """.strip()
 
-logger = get_logger("celery.beat")
+logger = get_logger('celery.beat')
 
 
 class Beat(configurated):
     Service = beat.Service
 
     app = None
-    loglevel = from_config("log_level")
-    logfile = from_config("log_file")
-    schedule = from_config("schedule_filename")
-    scheduler_cls = from_config("scheduler")
+    loglevel = from_config('log_level')
+    logfile = from_config('log_file')
+    schedule = from_config('schedule_filename')
+    scheduler_cls = from_config('scheduler')
     redirect_stdouts = from_config()
     redirect_stdouts_level = from_config()
 
@@ -50,7 +50,7 @@ class Beat(configurated):
             socket_timeout=30, pidfile=None, **kwargs):
         """Starts the celerybeat task scheduler."""
         self.app = app = app_or_default(app or self.app)
-        self.setup_defaults(kwargs, namespace="celerybeat")
+        self.setup_defaults(kwargs, namespace='celerybeat')
 
         self.max_interval = max_interval
         self.socket_timeout = socket_timeout
@@ -63,7 +63,7 @@ class Beat(configurated):
     def run(self):
         self.setup_logging()
         print(str(self.colored.cyan(
-                    "celerybeat v%s is starting." % __version__)))
+                    'celerybeat v%s is starting.' % __version__)))
         self.init_loader()
         self.set_process_title()
         self.start_scheduler()
@@ -84,19 +84,19 @@ class Beat(configurated):
                             scheduler_cls=self.scheduler_cls,
                             schedule_filename=self.schedule)
 
-        print(str(c.blue("__    ", c.magenta("-"),
-                  c.blue("    ... __   "), c.magenta("-"),
-                  c.blue("        _\n"),
+        print(str(c.blue('__    ', c.magenta('-'),
+                  c.blue('    ... __   '), c.magenta('-'),
+                  c.blue('        _\n'),
                   c.reset(self.startup_info(beat)))))
         if self.socket_timeout:
-            logger.debug("Setting default socket timeout to %r",
+            logger.debug('Setting default socket timeout to %r',
                          self.socket_timeout)
             socket.setdefaulttimeout(self.socket_timeout)
         try:
             self.install_sync_handler(beat)
             beat.start()
         except Exception, exc:
-            logger.critical("celerybeat raised exception %s: %r",
+            logger.critical('celerybeat raised exception %s: %r',
                             exc.__class__, exc,
                             exc_info=True)
 
@@ -109,20 +109,20 @@ class Beat(configurated):
     def startup_info(self, beat):
         scheduler = beat.get_scheduler(lazy=True)
         return STARTUP_INFO_FMT % {
-            "conninfo": self.app.broker_connection().as_uri(),
-            "logfile": self.logfile or "[stderr]",
-            "loglevel": LOG_LEVELS[self.loglevel],
-            "loader": qualname(self.app.loader),
-            "scheduler": qualname(scheduler),
-            "scheduler_info": scheduler.info,
-            "hmax_interval": humanize_seconds(beat.max_interval),
-            "max_interval": beat.max_interval,
+            'conninfo': self.app.broker_connection().as_uri(),
+            'logfile': self.logfile or '[stderr]',
+            'loglevel': LOG_LEVELS[self.loglevel],
+            'loader': qualname(self.app.loader),
+            'scheduler': qualname(scheduler),
+            'scheduler_info': scheduler.info,
+            'hmax_interval': humanize_seconds(beat.max_interval),
+            'max_interval': beat.max_interval,
         }
 
     def set_process_title(self):
-        arg_start = "manage" in sys.argv[0] and 2 or 1
-        platforms.set_process_title("celerybeat",
-                               info=" ".join(sys.argv[arg_start:]))
+        arg_start = 'manage' in sys.argv[0] and 2 or 1
+        platforms.set_process_title('celerybeat',
+                               info=' '.join(sys.argv[arg_start:]))
 
     def install_sync_handler(self, beat):
         """Install a `SIGTERM` + `SIGINT` handler that saves

+ 69 - 69
celery/apps/worker.py

@@ -3,7 +3,7 @@
     celery.apps.worker
     ~~~~~~~~~~~~~~~~~~
 
-    This module is the "program-version" of :mod:`celery.worker`.
+    This module is the 'program-version' of :mod:`celery.worker`.
 
     It does everything necessary to run that module
     as an actual application, like installing signal handlers,
@@ -46,25 +46,25 @@ def active_thread_count():
     from threading import enumerate
     # must use .getName on Python 2.5
     return sum(1 for t in enumerate()
-        if not t.getName().startswith("Dummy-"))
+        if not t.getName().startswith('Dummy-'))
 
 
 def safe_say(msg):
-    sys.__stderr__.write("\n%s\n" % msg)
+    sys.__stderr__.write('\n%s\n' % msg)
 
 ARTLINES = [
-    " --------------",
-    "---- **** -----",
-    "--- * ***  * --",
-    "-- * - **** ---",
-    "- ** ----------",
-    "- ** ----------",
-    "- ** ----------",
-    "- ** ----------",
-    "- *** --- * ---",
-    "-- ******* ----",
-    "--- ***** -----",
-    " --------------",
+    ' --------------',
+    '---- **** -----',
+    '--- * ***  * --',
+    '-- * - **** ---',
+    '- ** ----------',
+    '- ** ----------',
+    '- ** ----------',
+    '- ** ----------',
+    '- *** --- * ---',
+    '-- ******* ----',
+    '--- ***** -----',
+    ' --------------',
 ]
 
 BANNER = """\
@@ -99,7 +99,7 @@ class Worker(configurated):
 
     app = None
     inherit_confopts = (WorkController, )
-    loglevel = from_config("log_level")
+    loglevel = from_config('log_level')
     redirect_stdouts = from_config()
     redirect_stdouts_level = from_config()
 
@@ -114,7 +114,7 @@ class Worker(configurated):
         signals.celeryd_init.send(sender=self.hostname, instance=self,
                                   conf=self.app.conf)
 
-        self.setup_defaults(kwargs, namespace="celeryd")
+        self.setup_defaults(kwargs, namespace='celeryd')
         if not self.concurrency:
             try:
                 self.concurrency = cpu_count()
@@ -130,17 +130,17 @@ class Worker(configurated):
         self.autoreload = autoreload
         self.no_execv = no_execv
         if autoscale:
-            max_c, _, min_c = autoscale.partition(",")
+            max_c, _, min_c = autoscale.partition(',')
             self.autoscale = [int(max_c), min_c and int(min_c) or 0]
         self._isatty = isatty(sys.stdout)
 
         self.colored = app.log.colored(self.logfile)
 
         if isinstance(self.use_queues, basestring):
-            self.use_queues = self.use_queues.split(",")
+            self.use_queues = self.use_queues.split(',')
         if self.include:
             if isinstance(self.include, basestring):
-                self.include = self.include.split(",")
+                self.include = self.include.split(',')
             app.conf.CELERY_INCLUDE = (
                 tuple(app.conf.CELERY_INCLUDE) + tuple(self.include))
         self.loglevel = mlevel(self.loglevel)
@@ -149,18 +149,18 @@ class Worker(configurated):
         self.init_queues()
         self.app.loader.init_worker()
 
-        if getattr(os, "getuid", None) and os.getuid() == 0:
+        if getattr(os, 'getuid', None) and os.getuid() == 0:
             warnings.warn(RuntimeWarning(
-                "Running celeryd with superuser privileges is discouraged!"))
+                'Running celeryd with superuser privileges is discouraged!'))
 
         if self.purge:
             self.purge_messages()
 
         # Dump configuration to screen so we have some basic information
         # for when users sends bug reports.
-        print(str(self.colored.cyan(" \n", self.startup_info())) +
-              str(self.colored.reset(self.extra_info() or "")))
-        self.set_process_status("-active-")
+        print(str(self.colored.cyan(' \n', self.startup_info())) +
+              str(self.colored.reset(self.extra_info() or '')))
+        self.set_process_status('-active-')
 
         self.redirect_stdouts_to_logger()
         try:
@@ -170,7 +170,7 @@ class Worker(configurated):
 
     def on_consumer_ready(self, consumer):
         signals.worker_ready.send(sender=consumer)
-        print("celery@%s has started." % self.hostname)
+        print('celery@%s has started.' % self.hostname)
 
     def init_queues(self):
         try:
@@ -184,48 +184,48 @@ class Worker(configurated):
 
     def purge_messages(self):
         count = self.app.control.purge()
-        print("purge: Erased %d %s from the queue.\n" % (
-                count, pluralize(count, "message")))
+        print('purge: Erased %d %s from the queue.\n' % (
+                count, pluralize(count, 'message')))
 
     def tasklist(self, include_builtins=True):
         tasks = self.app.tasks.keys()
         if not include_builtins:
-            tasks = filter(lambda s: not s.startswith("celery."), tasks)
-        return "\n".join("  . %s" % task for task in sorted(tasks))
+            tasks = filter(lambda s: not s.startswith('celery.'), tasks)
+        return '\n'.join('  . %s' % task for task in sorted(tasks))
 
     def extra_info(self):
         if self.loglevel <= logging.INFO:
             include_builtins = self.loglevel <= logging.DEBUG
             tasklist = self.tasklist(include_builtins=include_builtins)
-            return EXTRA_INFO_FMT % {"tasks": tasklist}
+            return EXTRA_INFO_FMT % {'tasks': tasklist}
 
     def startup_info(self):
         app = self.app
         concurrency = unicode(self.concurrency)
-        appr = "%s:0x%x" % (app.main or "__main__", id(app))
+        appr = '%s:0x%x' % (app.main or '__main__', id(app))
         if not isinstance(app.loader, AppLoader):
             loader = qualname(app.loader)
-            if loader.startswith("celery.loaders"):
+            if loader.startswith('celery.loaders'):
                 loader = loader[14:]
-            appr += " (%s)" % loader
+            appr += ' (%s)' % loader
         if self.autoscale:
-            concurrency = "{min=%s, max=%s}" % tuple(self.autoscale)
+            concurrency = '{min=%s, max=%s}' % tuple(self.autoscale)
         pool = self.pool_cls
         if not isinstance(pool, basestring):
             pool = pool.__module__
-        concurrency += " (%s)" % pool.split('.')[-1]
-        events = "ON"
+        concurrency += ' (%s)' % pool.split('.')[-1]
+        events = 'ON'
         if not self.send_events:
-            events = "OFF (enable -E to monitor this worker)"
+            events = 'OFF (enable -E to monitor this worker)'
 
         banner = (BANNER % {
-            "app": appr,
-            "hostname": self.hostname,
-            "version": __version__,
-            "conninfo": self.app.broker_connection().as_uri(),
-            "concurrency": concurrency,
-            "events": events,
-            "queues": app.amqp.queues.format(indent=0, indent_first=False),
+            'app': appr,
+            'hostname': self.hostname,
+            'version': __version__,
+            'conninfo': self.app.broker_connection().as_uri(),
+            'concurrency': concurrency,
+            'events': events,
+            'queues': app.amqp.queues.format(indent=0, indent_first=False),
         }).splitlines()
 
         # integrate the ASCII art.
@@ -272,56 +272,56 @@ class Worker(configurated):
 
     def osx_proxy_detection_workaround(self):
         """See http://github.com/celery/celery/issues#issue/161"""
-        os.environ.setdefault("celery_dummy_proxy", "set_by_celeryd")
+        os.environ.setdefault('celery_dummy_proxy', 'set_by_celeryd')
 
     def set_process_status(self, info):
-        return platforms.set_mp_process_title("celeryd",
-                info="%s (%s)" % (info, platforms.strargv(sys.argv)),
+        return platforms.set_mp_process_title('celeryd',
+                info='%s (%s)' % (info, platforms.strargv(sys.argv)),
                 hostname=self.hostname)
 
 
-def _shutdown_handler(worker, sig="TERM", how="Warm", exc=SystemExit,
+def _shutdown_handler(worker, sig='TERM', how='Warm', exc=SystemExit,
         callback=None):
 
     def _handle_request(signum, frame):
         set_in_sighandler(True)
         try:
             from celery.worker import state
-            if current_process()._name == "MainProcess":
+            if current_process()._name == 'MainProcess':
                 if callback:
                     callback(worker)
-                safe_say("celeryd: %s shutdown (MainProcess)" % how)
+                safe_say('celeryd: %s shutdown (MainProcess)' % how)
             if active_thread_count() > 1:
-                setattr(state, {"Warm": "should_stop",
-                                "Cold": "should_terminate"}[how], True)
+                setattr(state, {'Warm': 'should_stop',
+                                'Cold': 'should_terminate'}[how], True)
             else:
                 raise exc()
         finally:
             set_in_sighandler(False)
-    _handle_request.__name__ = "worker_" + how
+    _handle_request.__name__ = 'worker_' + how
     platforms.signals[sig] = _handle_request
 install_worker_term_handler = partial(
-    _shutdown_handler, sig="SIGTERM", how="Warm", exc=SystemExit,
+    _shutdown_handler, sig='SIGTERM', how='Warm', exc=SystemExit,
 )
 install_worker_term_hard_handler = partial(
-    _shutdown_handler, sig="SIGQUIT", how="Cold", exc=SystemTerminate,
+    _shutdown_handler, sig='SIGQUIT', how='Cold', exc=SystemTerminate,
 )
 
 
 def on_SIGINT(worker):
-    safe_say("celeryd: Hitting Ctrl+C again will terminate all running tasks!")
-    install_worker_term_hard_handler(worker, sig="SIGINT")
+    safe_say('celeryd: Hitting Ctrl+C again will terminate all running tasks!')
+    install_worker_term_hard_handler(worker, sig='SIGINT')
 install_worker_int_handler = partial(
-    _shutdown_handler, sig="SIGINT", callback=on_SIGINT
+    _shutdown_handler, sig='SIGINT', callback=on_SIGINT
 )
 
 
-def install_worker_restart_handler(worker, sig="SIGHUP"):
+def install_worker_restart_handler(worker, sig='SIGHUP'):
 
     def restart_worker_sig_handler(signum, frame):
         """Signal handler restarting the current python program."""
         set_in_sighandler(True)
-        safe_say("Restarting celeryd (%s)" % (" ".join(sys.argv), ))
+        safe_say('Restarting celeryd (%s)' % (' '.join(sys.argv), ))
         pid = os.fork()
         if pid == 0:
             os.execv(sys.executable, [sys.executable] + sys.argv)
@@ -332,8 +332,8 @@ def install_worker_restart_handler(worker, sig="SIGHUP"):
 
 def install_cry_handler():
     # Jython/PyPy does not have sys._current_frames
-    is_jython = sys.platform.startswith("java")
-    is_pypy = hasattr(sys, "pypy_version_info")
+    is_jython = sys.platform.startswith('java')
+    is_pypy = hasattr(sys, 'pypy_version_info')
     if is_jython or is_pypy:  # pragma: no cover
         return
 
@@ -344,11 +344,11 @@ def install_cry_handler():
             safe_say(cry())
         finally:
             set_in_sighandler(False)
-    platforms.signals["SIGUSR1"] = cry_handler
+    platforms.signals['SIGUSR1'] = cry_handler
 
 
-def install_rdb_handler(envvar="CELERY_RDBSIG",
-                        sig="SIGUSR2"):  # pragma: no cover
+def install_rdb_handler(envvar='CELERY_RDBSIG',
+                        sig='SIGUSR2'):  # pragma: no cover
 
     def rdb_handler(signum, frame):
         """Signal handler setting a rdb breakpoint at the current frame."""
@@ -362,13 +362,13 @@ def install_rdb_handler(envvar="CELERY_RDBSIG",
         platforms.signals[sig] = rdb_handler
 
 
-def install_HUP_not_supported_handler(worker, sig="SIGHUP"):
+def install_HUP_not_supported_handler(worker, sig='SIGHUP'):
 
     def warn_on_HUP_handler(signum, frame):
         set_in_sighandler(True)
         try:
-            safe_say("%(sig)s not supported: Restarting with %(sig)s is "
-                     "unstable on this platform!" % {"sig": sig})
+            safe_say('%(sig)s not supported: Restarting with %(sig)s is '
+                     'unstable on this platform!' % {'sig': sig})
         finally:
             set_in_sighandler(False)
     platforms.signals[sig] = warn_on_HUP_handler

+ 8 - 8
celery/backends/__init__.py

@@ -22,13 +22,13 @@ Unknown result backend: %r.  Did you spell that correctly? (%r)\
 """
 
 BACKEND_ALIASES = {
-    "amqp": "celery.backends.amqp:AMQPBackend",
-    "cache": "celery.backends.cache:CacheBackend",
-    "redis": "celery.backends.redis:RedisBackend",
-    "mongodb": "celery.backends.mongodb:MongoBackend",
-    "database": "celery.backends.database:DatabaseBackend",
-    "cassandra": "celery.backends.cassandra:CassandraBackend",
-    "disabled": "celery.backends.base:DisabledBackend",
+    'amqp': 'celery.backends.amqp:AMQPBackend',
+    'cache': 'celery.backends.cache:CacheBackend',
+    'redis': 'celery.backends.redis:RedisBackend',
+    'mongodb': 'celery.backends.mongodb:MongoBackend',
+    'database': 'celery.backends.database:DatabaseBackend',
+    'cassandra': 'celery.backends.cassandra:CassandraBackend',
+    'disabled': 'celery.backends.base:DisabledBackend',
 }
 
 #: deprecated alias to ``current_app.backend``.
@@ -38,7 +38,7 @@ default_backend = Proxy(lambda: current_app.backend)
 @memoize(100)
 def get_backend_cls(backend=None, loader=None):
     """Get backend class by name/alias"""
-    backend = backend or "disabled"
+    backend = backend or 'disabled'
     loader = loader or current_app.loader
     aliases = dict(BACKEND_ALIASES, **loader.override_backends)
     try:

+ 31 - 31
celery/backends/amqp.py

@@ -35,7 +35,7 @@ def repair_uuid(s):
     # Historically the dashes in UUIDS are removed from AMQ entity names,
     # but there is no known reason to.  Hopefully we'll be able to fix
     # this in v3.0.
-    return "%s-%s-%s-%s-%s" % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
+    return '%s-%s-%s-%s-%s' % (s[:8], s[8:12], s[12:16], s[16:20], s[20:])
 
 
 class AMQPBackend(BaseDictBackend):
@@ -50,10 +50,10 @@ class AMQPBackend(BaseDictBackend):
     supports_native_join = True
 
     retry_policy = {
-            "max_retries": 20,
-            "interval_start": 0,
-            "interval_step": 1,
-            "interval_max": 1,
+            'max_retries': 20,
+            'interval_start': 0,
+            'interval_step': 1,
+            'interval_max': 1,
     }
 
     def __init__(self, connection=None, exchange=None, exchange_type=None,
@@ -65,7 +65,7 @@ class AMQPBackend(BaseDictBackend):
         self.queue_arguments = {}
         self.persistent = (conf.CELERY_RESULT_PERSISTENT if persistent is None
                                                          else persistent)
-        delivery_mode = persistent and "persistent" or "transient"
+        delivery_mode = persistent and 'persistent' or 'transient'
         exchange = exchange or conf.CELERY_RESULT_EXCHANGE
         exchange_type = exchange_type or conf.CELERY_RESULT_EXCHANGE_TYPE
         self.exchange = self.Exchange(name=exchange,
@@ -81,18 +81,18 @@ class AMQPBackend(BaseDictBackend):
         dexpires = conf.CELERY_AMQP_TASK_RESULT_EXPIRES
 
         self.expires = None
-        if "expires" in kwargs:
-            if kwargs["expires"] is not None:
-                self.expires = self.prepare_expires(kwargs["expires"])
+        if 'expires' in kwargs:
+            if kwargs['expires'] is not None:
+                self.expires = self.prepare_expires(kwargs['expires'])
         else:
             self.expires = self.prepare_expires(dexpires)
 
         if self.expires:
-            self.queue_arguments["x-expires"] = int(self.expires * 1000)
+            self.queue_arguments['x-expires'] = int(self.expires * 1000)
         self.mutex = threading.Lock()
 
     def _create_binding(self, task_id):
-        name = task_id.replace("-", "")
+        name = task_id.replace('-', '')
         return self.Queue(name=name,
                           exchange=self.exchange,
                           routing_key=name,
@@ -107,12 +107,12 @@ class AMQPBackend(BaseDictBackend):
         """Send task return value and status."""
         with self.mutex:
             with self.app.amqp.producer_pool.acquire(block=True) as pub:
-                pub.publish({"task_id": task_id, "status": status,
-                             "result": self.encode_result(result, status),
-                             "traceback": traceback,
-                             "children": self.current_task_children()},
+                pub.publish({'task_id': task_id, 'status': status,
+                             'result': self.encode_result(result, status),
+                             'traceback': traceback,
+                             'children': self.current_task_children()},
                             exchange=self.exchange,
-                            routing_key=task_id.replace("-", ""),
+                            routing_key=task_id.replace('-', ''),
                             serializer=self.serializer,
                             retry=True, retry_policy=self.retry_policy,
                             declare=[self._create_binding(task_id)])
@@ -122,21 +122,21 @@ class AMQPBackend(BaseDictBackend):
             **kwargs):
         cached_meta = self._cache.get(task_id)
         if cache and cached_meta and \
-                cached_meta["status"] in states.READY_STATES:
+                cached_meta['status'] in states.READY_STATES:
             meta = cached_meta
         else:
             try:
                 meta = self.consume(task_id, timeout=timeout)
             except socket.timeout:
-                raise TimeoutError("The operation timed out.")
+                raise TimeoutError('The operation timed out.')
 
-        state = meta["status"]
+        state = meta['status']
         if state == states.SUCCESS:
-            return meta["result"]
+            return meta['result']
         elif state in states.PROPAGATE_STATES:
             if propagate:
-                raise self.exception_to_python(meta["result"])
-            return meta["result"]
+                raise self.exception_to_python(meta['result'])
+            return meta['result']
         else:
             return self.wait_for(task_id, timeout, cache)
 
@@ -163,7 +163,7 @@ class AMQPBackend(BaseDictBackend):
                     return self._cache[task_id]
                 except KeyError:
                     # result probably pending.
-                    return {"status": states.PENDING, "result": None}
+                    return {'status': states.PENDING, 'result': None}
     poll = get_task_meta  # XXX compat
 
     def drain_events(self, connection, consumer, timeout=None, now=time.time):
@@ -171,8 +171,8 @@ class AMQPBackend(BaseDictBackend):
         results = {}
 
         def callback(meta, message):
-            if meta["status"] in states.READY_STATES:
-                uuid = repair_uuid(message.delivery_info["routing_key"])
+            if meta['status'] in states.READY_STATES:
+                uuid = repair_uuid(message.delivery_info['routing_key'])
                 results[uuid] = meta
 
         consumer.callbacks[:] = [callback]
@@ -204,7 +204,7 @@ class AMQPBackend(BaseDictBackend):
                 except KeyError:
                     pass
                 else:
-                    if cached["status"] in states.READY_STATES:
+                    if cached['status'] in states.READY_STATES:
                         yield task_id, cached
                         cached_ids.add(task_id)
             ids ^= cached_ids
@@ -219,24 +219,24 @@ class AMQPBackend(BaseDictBackend):
 
     def reload_task_result(self, task_id):
         raise NotImplementedError(
-                "reload_task_result is not supported by this backend.")
+                'reload_task_result is not supported by this backend.')
 
     def reload_group_result(self, task_id):
         """Reload group result, even if it has been previously fetched."""
         raise NotImplementedError(
-                "reload_group_result is not supported by this backend.")
+                'reload_group_result is not supported by this backend.')
 
     def save_group(self, group_id, result):
         raise NotImplementedError(
-                "save_group is not supported by this backend.")
+                'save_group is not supported by this backend.')
 
     def restore_group(self, group_id, cache=True):
         raise NotImplementedError(
-                "restore_group is not supported by this backend.")
+                'restore_group is not supported by this backend.')
 
     def delete_group(self, group_id):
         raise NotImplementedError(
-                "delete_group is not supported by this backend.")
+                'delete_group is not supported by this backend.')
 
     def __reduce__(self, args=(), kwargs={}):
         kwargs.update(connection=self._connection,

+ 49 - 49
celery/backends/base.py

@@ -35,7 +35,7 @@ from celery.utils.serialization import (
         create_exception_cls,
 )
 
-EXCEPTION_ABLE_CODECS = frozenset(["pickle", "yaml"])
+EXCEPTION_ABLE_CODECS = frozenset(['pickle', 'yaml'])
 is_py3k = sys.version_info >= (3, 0)
 
 
@@ -62,8 +62,8 @@ class BaseBackend(object):
 
     def __init__(self, *args, **kwargs):
         from celery.app import app_or_default
-        self.app = app_or_default(kwargs.get("app"))
-        self.serializer = kwargs.get("serializer",
+        self.app = app_or_default(kwargs.get('app'))
+        self.serializer = kwargs.get('serializer',
                                      self.app.conf.CELERY_RESULT_SERIALIZER)
         (self.content_type,
          self.content_encoding,
@@ -97,7 +97,7 @@ class BaseBackend(object):
     def store_result(self, task_id, result, status, traceback=None):
         """Store the result and status of a task."""
         raise NotImplementedError(
-                "store_result is not supported by this backend.")
+                'store_result is not supported by this backend.')
 
     def mark_as_started(self, task_id, **meta):
         """Mark a task as started"""
@@ -126,21 +126,21 @@ class BaseBackend(object):
         """Prepare exception for serialization."""
         if self.serializer in EXCEPTION_ABLE_CODECS:
             return get_pickleable_exception(exc)
-        return {"exc_type": type(exc).__name__, "exc_message": str(exc)}
+        return {'exc_type': type(exc).__name__, 'exc_message': str(exc)}
 
     def exception_to_python(self, exc):
         """Convert serialized exception to Python exception."""
         if self.serializer in EXCEPTION_ABLE_CODECS:
             return get_pickled_exception(exc)
-        return create_exception_cls(from_utf8(exc["exc_type"]),
-                                    sys.modules[__name__])(exc["exc_message"])
+        return create_exception_cls(from_utf8(exc['exc_type']),
+                                    sys.modules[__name__])(exc['exc_message'])
 
     def prepare_value(self, result):
         """Prepare value for storage."""
         return result
 
     def forget(self, task_id):
-        raise NotImplementedError("%s does not implement forget." % (
+        raise NotImplementedError('%s does not implement forget.' % (
                     self.__class__))
 
     def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
@@ -170,7 +170,7 @@ class BaseBackend(object):
             time.sleep(interval)
             time_elapsed += interval
             if timeout and time_elapsed >= timeout:
-                raise TimeoutError("The operation timed out.")
+                raise TimeoutError('The operation timed out.')
 
     def cleanup(self):
         """Backend cleanup. Is run by
@@ -184,52 +184,52 @@ class BaseBackend(object):
     def get_status(self, task_id):
         """Get the status of a task."""
         raise NotImplementedError(
-                "get_status is not supported by this backend.")
+                'get_status is not supported by this backend.')
 
     def get_result(self, task_id):
         """Get the result of a task."""
         raise NotImplementedError(
-                "get_result is not supported by this backend.")
+                'get_result is not supported by this backend.')
 
     def get_children(self, task_id):
         raise NotImplementedError(
-                "get_children is not supported by this backend.")
+                'get_children is not supported by this backend.')
 
     def get_traceback(self, task_id):
         """Get the traceback for a failed task."""
         raise NotImplementedError(
-                "get_traceback is not supported by this backend.")
+                'get_traceback is not supported by this backend.')
 
     def save_group(self, group_id, result):
         """Store the result and status of a task."""
         raise NotImplementedError(
-                "save_group is not supported by this backend.")
+                'save_group is not supported by this backend.')
 
     def restore_group(self, group_id, cache=True):
         """Get the result of a group."""
         raise NotImplementedError(
-                "restore_group is not supported by this backend.")
+                'restore_group is not supported by this backend.')
 
     def delete_group(self, group_id):
         raise NotImplementedError(
-                "delete_group is not supported by this backend.")
+                'delete_group is not supported by this backend.')
 
     def reload_task_result(self, task_id):
         """Reload task result, even if it has been previously fetched."""
         raise NotImplementedError(
-                "reload_task_result is not supported by this backend.")
+                'reload_task_result is not supported by this backend.')
 
     def reload_group_result(self, task_id):
         """Reload group result, even if it has been previously fetched."""
         raise NotImplementedError(
-                "reload_group_result is not supported by this backend.")
+                'reload_group_result is not supported by this backend.')
 
     def on_chord_part_return(self, task, propagate=False):
         pass
 
     def fallback_chord_unlock(self, group_id, body, result=None, **kwargs):
-        kwargs["result"] = [r.id for r in result]
-        self.app.tasks["celery.chord_unlock"].apply_async((group_id, body, ),
+        kwargs['result'] = [r.id for r in result]
+        self.app.tasks['celery.chord_unlock'].apply_async((group_id, body, ),
                                                           kwargs, countdown=1)
     on_chord_apply = fallback_chord_unlock
 
@@ -246,7 +246,7 @@ class BaseDictBackend(BaseBackend):
 
     def __init__(self, *args, **kwargs):
         super(BaseDictBackend, self).__init__(*args, **kwargs)
-        self._cache = LRUCache(limit=kwargs.get("max_cached_results") or
+        self._cache = LRUCache(limit=kwargs.get('max_cached_results') or
                                  self.app.conf.CELERY_MAX_CACHED_RESULTS)
 
     def store_result(self, task_id, result, status, traceback=None, **kwargs):
@@ -259,29 +259,29 @@ class BaseDictBackend(BaseBackend):
         self._forget(task_id)
 
     def _forget(self, task_id):
-        raise NotImplementedError("%s does not implement forget." % (
+        raise NotImplementedError('%s does not implement forget.' % (
                     self.__class__))
 
     def get_status(self, task_id):
         """Get the status of a task."""
-        return self.get_task_meta(task_id)["status"]
+        return self.get_task_meta(task_id)['status']
 
     def get_traceback(self, task_id):
         """Get the traceback for a failed task."""
-        return self.get_task_meta(task_id).get("traceback")
+        return self.get_task_meta(task_id).get('traceback')
 
     def get_result(self, task_id):
         """Get the result of a task."""
         meta = self.get_task_meta(task_id)
-        if meta["status"] in self.EXCEPTION_STATES:
-            return self.exception_to_python(meta["result"])
+        if meta['status'] in self.EXCEPTION_STATES:
+            return self.exception_to_python(meta['result'])
         else:
-            return meta["result"]
+            return meta['result']
 
     def get_children(self, task_id):
         """Get the list of subtasks sent by a task."""
         try:
-            return self.get_task_meta(task_id)["children"]
+            return self.get_task_meta(task_id)['children']
         except KeyError:
             pass
 
@@ -293,7 +293,7 @@ class BaseDictBackend(BaseBackend):
                 pass
 
         meta = self._get_task_meta_for(task_id)
-        if cache and meta.get("status") == states.SUCCESS:
+        if cache and meta.get('status') == states.SUCCESS:
             self._cache[task_id] = meta
         return meta
 
@@ -320,7 +320,7 @@ class BaseDictBackend(BaseBackend):
         """Get the result for a group."""
         meta = self.get_group_meta(group_id, cache=cache)
         if meta:
-            return meta["result"]
+            return meta['result']
 
     def save_group(self, group_id, result):
         """Store the result of an executed group."""
@@ -332,25 +332,25 @@ class BaseDictBackend(BaseBackend):
 
 
 class KeyValueStoreBackend(BaseDictBackend):
-    task_keyprefix = ensure_bytes("celery-task-meta-")
-    group_keyprefix = ensure_bytes("celery-taskset-meta-")
-    chord_keyprefix = ensure_bytes("chord-unlock-")
+    task_keyprefix = ensure_bytes('celery-task-meta-')
+    group_keyprefix = ensure_bytes('celery-taskset-meta-')
+    chord_keyprefix = ensure_bytes('chord-unlock-')
     implements_incr = False
 
     def get(self, key):
-        raise NotImplementedError("Must implement the get method.")
+        raise NotImplementedError('Must implement the get method.')
 
     def mget(self, keys):
-        raise NotImplementedError("Does not support get_many")
+        raise NotImplementedError('Does not support get_many')
 
     def set(self, key, value):
-        raise NotImplementedError("Must implement the set method.")
+        raise NotImplementedError('Must implement the set method.')
 
     def delete(self, key):
-        raise NotImplementedError("Must implement the delete method")
+        raise NotImplementedError('Must implement the delete method')
 
     def incr(self, key):
-        raise NotImplementedError("Does not implement incr")
+        raise NotImplementedError('Does not implement incr')
 
     def expire(self, key, value):
         pass
@@ -376,7 +376,7 @@ class KeyValueStoreBackend(BaseDictBackend):
         return bytes_to_str(key)
 
     def _mget_to_results(self, values, keys):
-        if hasattr(values, "items"):
+        if hasattr(values, 'items'):
             # client returns dict so mapping preserved.
             return dict((self._strip_prefix(k), self.decode(v))
                             for k, v in values.iteritems()
@@ -396,7 +396,7 @@ class KeyValueStoreBackend(BaseDictBackend):
             except KeyError:
                 pass
             else:
-                if cached["status"] in states.READY_STATES:
+                if cached['status'] in states.READY_STATES:
                     yield bytes_to_str(task_id), cached
                     cached_ids.add(task_id)
 
@@ -411,7 +411,7 @@ class KeyValueStoreBackend(BaseDictBackend):
             for key, value in r.iteritems():
                 yield bytes_to_str(key), value
             if timeout and iterations * interval >= timeout:
-                raise TimeoutError("Operation timed out (%s)" % (timeout, ))
+                raise TimeoutError('Operation timed out (%s)' % (timeout, ))
             time.sleep(interval)  # don't busy loop.
             iterations += 0
 
@@ -419,14 +419,14 @@ class KeyValueStoreBackend(BaseDictBackend):
         self.delete(self.get_key_for_task(task_id))
 
     def _store_result(self, task_id, result, status, traceback=None):
-        meta = {"status": status, "result": result, "traceback": traceback,
-                "children": self.current_task_children()}
+        meta = {'status': status, 'result': result, 'traceback': traceback,
+                'children': self.current_task_children()}
         self.set(self.get_key_for_task(task_id), self.encode(meta))
         return result
 
     def _save_group(self, group_id, result):
         self.set(self.get_key_for_group(group_id),
-                 self.encode({"result": result.serializable()}))
+                 self.encode({'result': result.serializable()}))
         return result
 
     def _delete_group(self, group_id):
@@ -436,7 +436,7 @@ class KeyValueStoreBackend(BaseDictBackend):
         """Get task metadata for a task by id."""
         meta = self.get(self.get_key_for_task(task_id))
         if not meta:
-            return {"status": states.PENDING, "result": None}
+            return {'status': states.PENDING, 'result': None}
         return self.decode(meta)
 
     def _restore_group(self, group_id):
@@ -447,9 +447,9 @@ class KeyValueStoreBackend(BaseDictBackend):
         # structure is kind of weird.
         if meta:
             meta = self.decode(meta)
-            result = meta["result"]
+            result = meta['result']
             if isinstance(result, (list, tuple)):
-                return {"result": from_serializable(result)}
+                return {'result': from_serializable(result)}
             return meta
 
     def on_chord_apply(self, group_id, body, result=None, **kwargs):
@@ -484,6 +484,6 @@ class DisabledBackend(BaseBackend):
         pass
 
     def _is_disabled(self, *args, **kwargs):
-        raise NotImplementedError("No result backend configured.  "
-                "Please see the documentation for more information.")
+        raise NotImplementedError('No result backend configured.  '
+                'Please see the documentation for more information.')
     wait_for = get_status = get_result = get_traceback = _is_disabled

+ 14 - 14
celery/backends/cache.py

@@ -28,14 +28,14 @@ def import_best_memcache():
                 import memcache  # noqa
             except ImportError:
                 raise ImproperlyConfigured(
-                        "Memcached backend requires either the 'pylibmc' "
-                        "or 'memcache' library")
+                    'Memcached backend requires either the pylibmc '
+                    'or memcache library')
         _imp[0] = (is_pylibmc, memcache)
     return _imp[0]
 
 
 def get_best_memcache(*args, **kwargs):
-    behaviors = kwargs.pop("behaviors", None)
+    behaviors = kwargs.pop('behaviors', None)
     is_pylibmc, memcache = import_best_memcache()
     client = memcache.Client(*args, **kwargs)
     if is_pylibmc and behaviors is not None:
@@ -65,10 +65,10 @@ class DummyClient(object):
         return self.cache.incr(key, delta)
 
 
-backends = {"memcache": lambda: get_best_memcache,
-            "memcached": lambda: get_best_memcache,
-            "pylibmc": lambda: get_best_memcache,
-            "memory": lambda: DummyClient}
+backends = {'memcache': lambda: get_best_memcache,
+            'memcached': lambda: get_best_memcache,
+            'pylibmc': lambda: get_best_memcache,
+            'memory': lambda: DummyClient}
 
 
 class CacheBackend(KeyValueStoreBackend):
@@ -84,16 +84,16 @@ class CacheBackend(KeyValueStoreBackend):
 
         self.backend = backend or self.app.conf.CELERY_CACHE_BACKEND
         if self.backend:
-            self.backend, _, servers = self.backend.partition("://")
-            self.servers = servers.rstrip('/').split(";")
+            self.backend, _, servers = self.backend.partition('://')
+            self.servers = servers.rstrip('/').split(';')
         self.expires = self.prepare_expires(expires, type=int)
         try:
             self.Client = backends[self.backend]()
         except KeyError:
             raise ImproperlyConfigured(
-                    "Unknown cache backend: %s. Please use one of the "
-                    "following backends: %s" % (self.backend,
-                                                ", ".join(backends.keys())))
+                    'Unknown cache backend: %s. Please use one of the '
+                    'following backends: %s' % (self.backend,
+                                                ', '.join(backends.keys())))
 
     def get(self, key):
         return self.client.get(key)
@@ -118,8 +118,8 @@ class CacheBackend(KeyValueStoreBackend):
         return self.Client(self.servers, **self.options)
 
     def __reduce__(self, args=(), kwargs={}):
-        servers = ";".join(self.servers)
-        backend = "%s://%s/" % (self.backend, servers)
+        servers = ';'.join(self.servers)
+        backend = '%s://%s/' % (self.backend, servers)
         kwargs.update(
             dict(backend=backend,
                  expires=self.expires,

+ 26 - 26
celery/backends/cassandra.py

@@ -33,7 +33,7 @@ class CassandraBackend(BaseDictBackend):
 
     .. attribute:: servers
 
-        List of Cassandra servers with format: "hostname:port".
+        List of Cassandra servers with format: ``hostname:port``.
 
     :raises celery.exceptions.ImproperlyConfigured: if
         module :mod:`pycassa` is not available.
@@ -56,31 +56,31 @@ class CassandraBackend(BaseDictBackend):
         """
         super(CassandraBackend, self).__init__(**kwargs)
 
-        self.expires = kwargs.get("expires") or maybe_timedelta(
+        self.expires = kwargs.get('expires') or maybe_timedelta(
                                     self.app.conf.CELERY_TASK_RESULT_EXPIRES)
 
         if not pycassa:
             raise ImproperlyConfigured(
-                "You need to install the pycassa library to use the "
-                "Cassandra backend. See https://github.com/pycassa/pycassa")
+                'You need to install the pycassa library to use the '
+                'Cassandra backend. See https://github.com/pycassa/pycassa')
 
         conf = self.app.conf
         self.servers = (servers or
-                        conf.get("CASSANDRA_SERVERS") or
+                        conf.get('CASSANDRA_SERVERS') or
                         self.servers)
         self.keyspace = (keyspace or
-                         conf.get("CASSANDRA_KEYSPACE") or
+                         conf.get('CASSANDRA_KEYSPACE') or
                          self.keyspace)
         self.column_family = (column_family or
-                              conf.get("CASSANDRA_COLUMN_FAMILY") or
+                              conf.get('CASSANDRA_COLUMN_FAMILY') or
                               self.column_family)
-        self.cassandra_options = dict(conf.get("CASSANDRA_OPTIONS") or {},
+        self.cassandra_options = dict(conf.get('CASSANDRA_OPTIONS') or {},
                                       **cassandra_options or {})
         self.detailed_mode = (detailed_mode or
-                              conf.get("CASSANDRA_DETAILED_MODE") or
+                              conf.get('CASSANDRA_DETAILED_MODE') or
                               self.detailed_mode)
-        read_cons = conf.get("CASSANDRA_READ_CONSISTENCY") or "LOCAL_QUORUM"
-        write_cons = conf.get("CASSANDRA_WRITE_CONSISTENCY") or "LOCAL_QUORUM"
+        read_cons = conf.get('CASSANDRA_READ_CONSISTENCY') or 'LOCAL_QUORUM'
+        write_cons = conf.get('CASSANDRA_WRITE_CONSISTENCY') or 'LOCAL_QUORUM'
         try:
             self.read_consistency = getattr(pycassa.ConsistencyLevel,
                                             read_cons)
@@ -94,7 +94,7 @@ class CassandraBackend(BaseDictBackend):
 
         if not self.servers or not self.keyspace or not self.column_family:
             raise ImproperlyConfigured(
-                    "Cassandra backend not configured.")
+                    'Cassandra backend not configured.')
 
         self._column_family = None
 
@@ -135,16 +135,16 @@ class CassandraBackend(BaseDictBackend):
         def _do_store():
             cf = self._get_column_family()
             date_done = self.app.now()
-            meta = {"status": status,
-                    "date_done": date_done.strftime('%Y-%m-%dT%H:%M:%SZ'),
-                    "traceback": self.encode(traceback),
-                    "children": self.encode(self.current_task_children())}
+            meta = {'status': status,
+                    'date_done': date_done.strftime('%Y-%m-%dT%H:%M:%SZ'),
+                    'traceback': self.encode(traceback),
+                    'children': self.encode(self.current_task_children())}
             if self.detailed_mode:
-                meta["result"] = result
+                meta['result'] = result
                 cf.insert(task_id, {date_done: self.encode(meta)},
                           ttl=timedelta_seconds(self.expires))
             else:
-                meta["result"] = self.encode(result)
+                meta['result'] = self.encode(result)
                 cf.insert(task_id, meta,
                           ttl=timedelta_seconds(self.expires))
 
@@ -159,19 +159,19 @@ class CassandraBackend(BaseDictBackend):
                 if self.detailed_mode:
                     row = cf.get(task_id, column_reversed=True, column_count=1)
                     meta = self.decode(row.values()[0])
-                    meta["task_id"] = task_id
+                    meta['task_id'] = task_id
                 else:
                     obj = cf.get(task_id)
                     meta = {
-                        "task_id": task_id,
-                        "status": obj["status"],
-                        "result": self.decode(obj["result"]),
-                        "date_done": obj["date_done"],
-                        "traceback": self.decode(obj["traceback"]),
-                        "children": self.decode(obj["children"]),
+                        'task_id': task_id,
+                        'status': obj['status'],
+                        'result': self.decode(obj['result']),
+                        'date_done': obj['date_done'],
+                        'traceback': self.decode(obj['traceback']),
+                        'children': self.decode(obj['children']),
                     }
             except (KeyError, pycassa.NotFoundException):
-                meta = {"status": states.PENDING, "result": None}
+                meta = {'status': states.PENDING, 'result': None}
             return meta
 
         return self._retry_on_error(_do_get)

+ 6 - 6
celery/backends/database/__init__.py

@@ -25,8 +25,8 @@ def _sqlalchemy_installed():
         import sqlalchemy
     except ImportError:
         raise ImproperlyConfigured(
-            "The database result backend requires SQLAlchemy to be installed."
-            "See http://pypi.python.org/pypi/SQLAlchemy")
+            'The database result backend requires SQLAlchemy to be installed.'
+            'See http://pypi.python.org/pypi/SQLAlchemy')
     return sqlalchemy
 _sqlalchemy_installed()
 
@@ -37,7 +37,7 @@ def retry(fun):
 
     @wraps(fun)
     def _inner(*args, **kwargs):
-        max_retries = kwargs.pop("max_retries", 3)
+        max_retries = kwargs.pop('max_retries', 3)
 
         for retries in xrange(max_retries + 1):
             try:
@@ -63,12 +63,12 @@ class DatabaseBackend(BaseDictBackend):
         self.dburi = dburi or conf.CELERY_RESULT_DBURI
         self.engine_options = dict(engine_options or {},
                         **conf.CELERY_RESULT_ENGINE_OPTIONS or {})
-        self.short_lived_sessions = kwargs.get("short_lived_sessions",
+        self.short_lived_sessions = kwargs.get('short_lived_sessions',
                                     conf.CELERY_RESULT_DB_SHORT_LIVED_SESSIONS)
         if not self.dburi:
             raise ImproperlyConfigured(
-                    "Missing connection string! Do you have "
-                    "CELERY_RESULT_DBURI set to a real value?")
+                    'Missing connection string! Do you have '
+                    'CELERY_RESULT_DBURI set to a real value?')
 
     def ResultSession(self):
         return ResultSession(

+ 15 - 13
celery/backends/database/a805d4bd.py

@@ -1,23 +1,25 @@
 # -*- coding: utf-8 -*-
 """
-a805d4bd
-This module fixes a bug with pickling and relative imports in Python < 2.6.
+    celery.backends.database.a805d4bd
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The problem is with pickling an e.g. `exceptions.KeyError` instance.
-As SQLAlchemy has its own `exceptions` module, pickle will try to
-lookup :exc:`KeyError` in the wrong module, resulting in this exception::
+    This module fixes a bug with pickling and relative imports in Python < 2.6.
 
-    cPickle.PicklingError: Can't pickle <type 'exceptions.KeyError'>:
-        attribute lookup exceptions.KeyError failed
+    The problem is with pickling an e.g. `exceptions.KeyError` instance.
+    As SQLAlchemy has its own `exceptions` module, pickle will try to
+    lookup :exc:`KeyError` in the wrong module, resulting in this exception::
 
-doing `import exceptions` just before the dump in `sqlalchemy.types`
-reveals the source of the bug::
+        cPickle.PicklingError: Can't pickle <type 'exceptions.KeyError'>:
+            attribute lookup exceptions.KeyError failed
 
-    EXCEPTIONS: <module 'sqlalchemy.exc' from '/var/lib/hudson/jobs/celery/
-        workspace/buildenv/lib/python2.5/site-packages/sqlalchemy/exc.pyc'>
+    doing `import exceptions` just before the dump in `sqlalchemy.types`
+    reveals the source of the bug::
 
-Hence the random module name "a805d5bd" is taken to decrease the chances of
-a collision.
+        EXCEPTIONS: <module 'sqlalchemy.exc' from '/var/lib/hudson/jobs/celery/
+            workspace/buildenv/lib/python2.5/site-packages/sqlalchemy/exc.pyc'>
+
+    Hence the random module name 'a805d5bd' is taken to decrease the chances of
+    a collision.
 
 """
 from __future__ import absolute_import

+ 2 - 2
celery/backends/database/dfd042c7.py

@@ -38,8 +38,8 @@ class PickleType(_PickleType):  # pragma: no cover
             return self.comparator(x, y)
         elif self.mutable and not hasattr(x, '__eq__') and x is not None:
             util.warn_deprecated(
-                    "Objects stored with PickleType when mutable=True "
-                    "must implement __eq__() for reliable comparison.")
+                    'Objects stored with PickleType when mutable=True '
+                    'must implement __eq__() for reliable comparison.')
             a = self.pickler.dumps(x, self.protocol)
             b = self.pickler.dumps(y, self.protocol)
             return a == b

+ 16 - 16
celery/backends/database/models.py

@@ -25,10 +25,10 @@ else:
 
 class Task(ResultModelBase):
     """Task result/status."""
-    __tablename__ = "celery_taskmeta"
-    __table_args__ = {"sqlite_autoincrement": True}
+    __tablename__ = 'celery_taskmeta'
+    __table_args__ = {'sqlite_autoincrement': True}
 
-    id = sa.Column(sa.Integer, sa.Sequence("task_id_sequence"),
+    id = sa.Column(sa.Integer, sa.Sequence('task_id_sequence'),
                    primary_key=True,
                    autoincrement=True)
     task_id = sa.Column(sa.String(255), unique=True)
@@ -42,22 +42,22 @@ class Task(ResultModelBase):
         self.task_id = task_id
 
     def to_dict(self):
-        return {"task_id": self.task_id,
-                "status": self.status,
-                "result": self.result,
-                "traceback": self.traceback,
-                "date_done": self.date_done}
+        return {'task_id': self.task_id,
+                'status': self.status,
+                'result': self.result,
+                'traceback': self.traceback,
+                'date_done': self.date_done}
 
     def __repr__(self):
-        return "<Task %s state: %s>" % (self.task_id, self.status)
+        return '<Task %s state: %s>' % (self.task_id, self.status)
 
 
 class TaskSet(ResultModelBase):
     """TaskSet result"""
-    __tablename__ = "celery_tasksetmeta"
-    __table_args__ = {"sqlite_autoincrement": True}
+    __tablename__ = 'celery_tasksetmeta'
+    __table_args__ = {'sqlite_autoincrement': True}
 
-    id = sa.Column(sa.Integer, sa.Sequence("taskset_id_sequence"),
+    id = sa.Column(sa.Integer, sa.Sequence('taskset_id_sequence'),
                 autoincrement=True, primary_key=True)
     taskset_id = sa.Column(sa.String(255), unique=True)
     result = sa.Column(sa.PickleType, nullable=True)
@@ -69,9 +69,9 @@ class TaskSet(ResultModelBase):
         self.result = result
 
     def to_dict(self):
-        return {"taskset_id": self.taskset_id,
-                "result": self.result,
-                "date_done": self.date_done}
+        return {'taskset_id': self.taskset_id,
+                'result': self.result,
+                'date_done': self.date_done}
 
     def __repr__(self):
-        return u"<TaskSet: %s>" % (self.taskset_id, )
+        return '<TaskSet: %s>' % (self.taskset_id, )

+ 2 - 2
celery/backends/database/session.py

@@ -35,9 +35,9 @@ def create_session(dburi, short_lived_sessions=False, **kwargs):
 
 
 def setup_results(engine):
-    if not _SETUP["results"]:
+    if not _SETUP['results']:
         ResultModelBase.metadata.create_all(engine)
-        _SETUP["results"] = True
+        _SETUP['results'] = True
 
 
 def ResultSession(dburi, **kwargs):

+ 41 - 41
celery/backends/mongodb.py

@@ -39,12 +39,12 @@ class Bunch(object):
 
 
 class MongoBackend(BaseDictBackend):
-    mongodb_host = "localhost"
+    mongodb_host = 'localhost'
     mongodb_port = 27017
     mongodb_user = None
     mongodb_password = None
-    mongodb_database = "celery"
-    mongodb_taskmeta_collection = "celery_taskmeta"
+    mongodb_database = 'celery'
+    mongodb_taskmeta_collection = 'celery_taskmeta'
 
     def __init__(self, *args, **kwargs):
         """Initialize MongoDB backend instance.
@@ -54,29 +54,29 @@ class MongoBackend(BaseDictBackend):
 
         """
         super(MongoBackend, self).__init__(*args, **kwargs)
-        self.expires = kwargs.get("expires") or maybe_timedelta(
+        self.expires = kwargs.get('expires') or maybe_timedelta(
                                     self.app.conf.CELERY_TASK_RESULT_EXPIRES)
 
         if not pymongo:
             raise ImproperlyConfigured(
-                "You need to install the pymongo library to use the "
-                "MongoDB backend.")
+                'You need to install the pymongo library to use the '
+                'MongoDB backend.')
 
-        config = self.app.conf.get("CELERY_MONGODB_BACKEND_SETTINGS", None)
+        config = self.app.conf.get('CELERY_MONGODB_BACKEND_SETTINGS', None)
         if config is not None:
             if not isinstance(config, dict):
                 raise ImproperlyConfigured(
-                    "MongoDB backend settings should be grouped in a dict")
+                    'MongoDB backend settings should be grouped in a dict')
 
-            self.mongodb_host = config.get("host", self.mongodb_host)
-            self.mongodb_port = int(config.get("port", self.mongodb_port))
-            self.mongodb_user = config.get("user", self.mongodb_user)
+            self.mongodb_host = config.get('host', self.mongodb_host)
+            self.mongodb_port = int(config.get('port', self.mongodb_port))
+            self.mongodb_user = config.get('user', self.mongodb_user)
             self.mongodb_password = config.get(
-                    "password", self.mongodb_password)
+                    'password', self.mongodb_password)
             self.mongodb_database = config.get(
-                    "database", self.mongodb_database)
+                    'database', self.mongodb_database)
             self.mongodb_taskmeta_collection = config.get(
-                "taskmeta_collection", self.mongodb_taskmeta_collection)
+                'taskmeta_collection', self.mongodb_taskmeta_collection)
 
         self._connection = None
 
@@ -93,7 +93,7 @@ class MongoBackend(BaseDictBackend):
             # See pymongo.Connection() for more info.
             args = [self.mongodb_host]
             if isinstance(self.mongodb_host, basestring) \
-                    and not self.mongodb_host.startswith("mongodb://"):
+                    and not self.mongodb_host.startswith('mongodb://'):
                 args.append(self.mongodb_port)
 
             self._connection = Connection(*args)
@@ -108,12 +108,12 @@ class MongoBackend(BaseDictBackend):
 
     def _store_result(self, task_id, result, status, traceback=None):
         """Store return value and status of an executed task."""
-        meta = {"_id": task_id,
-                "status": status,
-                "result": Binary(self.encode(result)),
-                "date_done": datetime.utcnow(),
-                "traceback": Binary(self.encode(traceback)),
-                "children": Binary(self.encode(self.current_task_children()))}
+        meta = {'_id': task_id,
+                'status': status,
+                'result': Binary(self.encode(result)),
+                'date_done': datetime.utcnow(),
+                'traceback': Binary(self.encode(traceback)),
+                'children': Binary(self.encode(self.current_task_children()))}
         self.collection.save(meta, safe=True)
 
         return result
@@ -121,47 +121,47 @@ class MongoBackend(BaseDictBackend):
     def _get_task_meta_for(self, task_id):
         """Get task metadata for a task by id."""
 
-        obj = self.collection.find_one({"_id": task_id})
+        obj = self.collection.find_one({'_id': task_id})
         if not obj:
-            return {"status": states.PENDING, "result": None}
+            return {'status': states.PENDING, 'result': None}
 
         meta = {
-            "task_id": obj["_id"],
-            "status": obj["status"],
-            "result": self.decode(obj["result"]),
-            "date_done": obj["date_done"],
-            "traceback": self.decode(obj["traceback"]),
-            "children": self.decode(obj["children"]),
+            'task_id': obj['_id'],
+            'status': obj['status'],
+            'result': self.decode(obj['result']),
+            'date_done': obj['date_done'],
+            'traceback': self.decode(obj['traceback']),
+            'children': self.decode(obj['children']),
         }
 
         return meta
 
     def _save_group(self, group_id, result):
         """Save the group result."""
-        meta = {"_id": group_id,
-                "result": Binary(self.encode(result)),
-                "date_done": datetime.utcnow()}
+        meta = {'_id': group_id,
+                'result': Binary(self.encode(result)),
+                'date_done': datetime.utcnow()}
         self.collection.save(meta, safe=True)
 
         return result
 
     def _restore_group(self, group_id):
         """Get the result for a group by id."""
-        obj = self.collection.find_one({"_id": group_id})
+        obj = self.collection.find_one({'_id': group_id})
         if not obj:
             return
 
         meta = {
-            "task_id": obj["_id"],
-            "result": self.decode(obj["result"]),
-            "date_done": obj["date_done"],
+            'task_id': obj['_id'],
+            'result': self.decode(obj['result']),
+            'date_done': obj['date_done'],
         }
 
         return meta
 
     def _delete_group(self, group_id):
         """Delete a group by id."""
-        self.collection.remove({"_id": group_id})
+        self.collection.remove({'_id': group_id})
 
     def _forget(self, task_id):
         """
@@ -173,13 +173,13 @@ class MongoBackend(BaseDictBackend):
         # By using safe=True, this will wait until it receives a response from
         # the server.  Likewise, it will raise an OperationsError if the
         # response was unable to be completed.
-        self.collection.remove({"_id": task_id}, safe=True)
+        self.collection.remove({'_id': task_id}, safe=True)
 
     def cleanup(self):
         """Delete expired metadata."""
         self.collection.remove({
-                "date_done": {
-                    "$lt": self.app.now() - self.expires,
+                'date_done': {
+                    '$lt': self.app.now() - self.expires,
                  }
         })
 
@@ -195,7 +195,7 @@ class MongoBackend(BaseDictBackend):
             if not db.authenticate(self.mongodb_user,
                                    self.mongodb_password):
                 raise ImproperlyConfigured(
-                    "Invalid MongoDB username or password.")
+                    'Invalid MongoDB username or password.')
         return db
 
     @cached_property

+ 10 - 10
celery/backends/redis.py

@@ -30,7 +30,7 @@ class RedisBackend(KeyValueStoreBackend):
     redis = redis
 
     #: default Redis server hostname (`localhost`).
-    host = "localhost"
+    host = 'localhost'
 
     #: default Redis server port (6379)
     port = 6379
@@ -53,12 +53,12 @@ class RedisBackend(KeyValueStoreBackend):
         conf = self.app.conf
         if self.redis is None:
             raise ImproperlyConfigured(
-                    "You need to install the redis library in order to use "
-                  + "the Redis result store backend.")
+                    'You need to install the redis library in order to use '
+                  + 'the Redis result store backend.')
 
         # For compatibility with the old REDIS_* configuration keys.
         def _get(key):
-            for prefix in "CELERY_REDIS_%s", "REDIS_%s":
+            for prefix in 'CELERY_REDIS_%s', 'REDIS_%s':
                 try:
                     return conf[prefix % key]
                 except KeyError:
@@ -69,14 +69,14 @@ class RedisBackend(KeyValueStoreBackend):
         uhost = uport = upass = udb = None
         if url:
             _, uhost, uport, _, upass, udb, _ = _parse_url(url)
-            udb = udb.strip("/")
-        self.host = uhost or host or _get("HOST") or self.host
-        self.port = int(uport or port or _get("PORT") or self.port)
-        self.db = udb or db or _get("DB") or self.db
-        self.password = upass or password or _get("PASSWORD") or self.password
+            udb = udb.strip('/')
+        self.host = uhost or host or _get('HOST') or self.host
+        self.port = int(uport or port or _get('PORT') or self.port)
+        self.db = udb or db or _get('DB') or self.db
+        self.password = upass or password or _get('PASSWORD') or self.password
         self.expires = self.prepare_expires(expires, type=int)
         self.max_connections = (max_connections
-                                or _get("MAX_CONNECTIONS")
+                                or _get('MAX_CONNECTIONS')
                                 or self.max_connections)
 
     def get(self, key):

+ 38 - 38
celery/beat.py

@@ -106,9 +106,9 @@ class ScheduleEntry(object):
         options).
 
         """
-        self.__dict__.update({"task": other.task, "schedule": other.schedule,
-                              "args": other.args, "kwargs": other.kwargs,
-                              "options": other.options})
+        self.__dict__.update({'task': other.task, 'schedule': other.schedule,
+                              'args': other.args, 'kwargs': other.kwargs,
+                              'options': other.options})
 
     def is_due(self):
         """See :meth:`~celery.schedule.schedule.is_due`."""
@@ -118,7 +118,7 @@ class ScheduleEntry(object):
         return vars(self).iteritems()
 
     def __repr__(self):
-        return ("<Entry: %s %s {%s}" % (self.name,
+        return ('<Entry: %s %s {%s}' % (self.name,
                     reprcall(self.task, self.args or (), self.kwargs or {}),
                     self.schedule))
 
@@ -160,25 +160,25 @@ class Scheduler(object):
     def install_default_entries(self, data):
         entries = {}
         if self.app.conf.CELERY_TASK_RESULT_EXPIRES:
-            if "celery.backend_cleanup" not in data:
-                entries["celery.backend_cleanup"] = {
-                        "task": "celery.backend_cleanup",
-                        "schedule": crontab("0", "4", "*"),
-                        "options": {"expires": 12 * 3600}}
+            if 'celery.backend_cleanup' not in data:
+                entries['celery.backend_cleanup'] = {
+                        'task': 'celery.backend_cleanup',
+                        'schedule': crontab('0', '4', '*'),
+                        'options': {'expires': 12 * 3600}}
         self.update_from_dict(entries)
 
     def maybe_due(self, entry, publisher=None):
         is_due, next_time_to_run = entry.is_due()
 
         if is_due:
-            info("Scheduler: Sending due task %s", entry.task)
+            info('Scheduler: Sending due task %s', entry.task)
             try:
                 result = self.apply_async(entry, publisher=publisher)
             except Exception, exc:
-                error("Message Error: %s\n%s",
+                error('Message Error: %s\n%s',
                       exc, traceback.format_stack(), exc_info=True)
             else:
-                debug("%s sent. id->%s", entry.task, result.id)
+                debug('%s sent. id->%s', entry.task, result.id)
         return next_time_to_run
 
     def tick(self):
@@ -239,7 +239,7 @@ class Scheduler(object):
 
     def _do_sync(self):
         try:
-            debug("Celerybeat: Synchronizing schedule...")
+            debug('Celerybeat: Synchronizing schedule...')
             self.sync()
         finally:
             self._last_sync = time.time()
@@ -284,8 +284,8 @@ class Scheduler(object):
         # callback called for each retry while the connection
         # can't be established.
         def _error_handler(exc, interval):
-            error("Celerybeat: Connection error: %s. "
-                  "Trying again in %s seconds...", exc, interval)
+            error('Celerybeat: Connection error: %s. '
+                  'Trying again in %s seconds...', exc, interval)
 
         return self.connection.ensure_connection(_error_handler,
                     self.app.conf.BROKER_CONNECTION_MAX_RETRIES)
@@ -307,17 +307,17 @@ class Scheduler(object):
 
     @property
     def info(self):
-        return ""
+        return ''
 
 
 class PersistentScheduler(Scheduler):
     persistence = shelve
-    known_suffixes = ("", ".db", ".dat", ".bak", ".dir")
+    known_suffixes = ('', '.db', '.dat', '.bak', '.dir')
 
     _store = None
 
     def __init__(self, *args, **kwargs):
-        self.schedule_filename = kwargs.get("schedule_filename")
+        self.schedule_filename = kwargs.get('schedule_filename')
         Scheduler.__init__(self, *args, **kwargs)
 
     def _remove_db(self):
@@ -332,29 +332,29 @@ class PersistentScheduler(Scheduler):
         try:
             self._store = self.persistence.open(self.schedule_filename,
                                                 writeback=True)
-            entries = self._store.setdefault("entries", {})
+            entries = self._store.setdefault('entries', {})
         except Exception, exc:
-            error("Removing corrupted schedule file %r: %r",
+            error('Removing corrupted schedule file %r: %r',
                   self.schedule_filename, exc, exc_info=True)
             self._remove_db()
             self._store = self.persistence.open(self.schedule_filename,
                                                 writeback=True)
         else:
-            if "__version__" not in self._store:
+            if '__version__' not in self._store:
                 self._store.clear()   # remove schedule at 2.2.2 upgrade.
-        entries = self._store.setdefault("entries", {})
+        entries = self._store.setdefault('entries', {})
         self.merge_inplace(self.app.conf.CELERYBEAT_SCHEDULE)
         self.install_default_entries(self.schedule)
-        self._store["__version__"] = __version__
+        self._store['__version__'] = __version__
         self.sync()
-        debug("Current schedule:\n" + "\n".join(repr(entry)
+        debug('Current schedule:\n' + '\n'.join(repr(entry)
                                     for entry in entries.itervalues()))
 
     def get_schedule(self):
-        return self._store["entries"]
+        return self._store['entries']
 
     def set_schedule(self, schedule):
-        self._store["entries"] = schedule
+        self._store['entries'] = schedule
     schedule = property(get_schedule, set_schedule)
 
     def sync(self):
@@ -367,7 +367,7 @@ class PersistentScheduler(Scheduler):
 
     @property
     def info(self):
-        return "    . db -> %s" % (self.schedule_filename, )
+        return '    . db -> %s' % (self.schedule_filename, )
 
 
 class Service(object):
@@ -386,20 +386,20 @@ class Service(object):
         self._is_stopped = Event()
 
     def start(self, embedded_process=False):
-        info("Celerybeat: Starting...")
-        debug("Celerybeat: Ticking with max interval->%s",
+        info('Celerybeat: Starting...')
+        debug('Celerybeat: Ticking with max interval->%s',
               humanize_seconds(self.scheduler.max_interval))
 
         signals.beat_init.send(sender=self)
         if embedded_process:
             signals.beat_embedded_init.send(sender=self)
-            platforms.set_process_title("celerybeat")
+            platforms.set_process_title('celerybeat')
 
         try:
             while not self._is_shutdown.is_set():
                 interval = self.scheduler.tick()
-                debug("Celerybeat: Waking up %s.",
-                      humanize_seconds(interval, prefix="in "))
+                debug('Celerybeat: Waking up %s.',
+                      humanize_seconds(interval, prefix='in '))
                 time.sleep(interval)
         except (KeyboardInterrupt, SystemExit):
             self._is_shutdown.set()
@@ -411,7 +411,7 @@ class Service(object):
         self._is_stopped.set()
 
     def stop(self, wait=False):
-        info("Celerybeat: Shutting down...")
+        info('Celerybeat: Shutting down...')
         self._is_shutdown.set()
         wait and self._is_stopped.wait()  # block until shutdown done.
 
@@ -436,7 +436,7 @@ class _Threaded(Thread):
         super(_Threaded, self).__init__()
         self.service = Service(*args, **kwargs)
         self.daemon = True
-        self.name = "Beat"
+        self.name = 'Beat'
 
     def run(self):
         self.service.start()
@@ -455,10 +455,10 @@ else:
         def __init__(self, *args, **kwargs):
             super(_Process, self).__init__()
             self.service = Service(*args, **kwargs)
-            self.name = "Beat"
+            self.name = 'Beat'
 
         def run(self):
-            platforms.signals.reset("SIGTERM")
+            platforms.signals.reset('SIGTERM')
             self.service.start(embedded_process=True)
 
         def stop(self):
@@ -473,9 +473,9 @@ def EmbeddedService(*args, **kwargs):
         Default is :const:`False`.
 
     """
-    if kwargs.pop("thread", False) or _Process is None:
+    if kwargs.pop('thread', False) or _Process is None:
         # Need short max interval to be able to stop thread
         # in reasonable time.
-        kwargs.setdefault("max_interval", 1)
+        kwargs.setdefault('max_interval', 1)
         return _Threaded(*args, **kwargs)
     return _Process(*args, **kwargs)

+ 32 - 32
celery/bin/base.py

@@ -82,7 +82,7 @@ from celery.utils.imports import symbol_by_name, import_from_cwd
 
 # always enable DeprecationWarnings, so our users can see them.
 for warning in (CDeprecationWarning, CPendingDeprecationWarning):
-    warnings.simplefilter("once", warning, 0)
+    warnings.simplefilter('once', warning, 0)
 
 ARGV_DISABLED = """
 Unrecognized command line arguments: %s
@@ -97,7 +97,7 @@ class HelpFormatter(IndentedHelpFormatter):
 
     def format_epilog(self, epilog):
         if epilog:
-            return "\n%s\n\n" % epilog
+            return '\n%s\n\n' % epilog
         return ''
 
     def format_description(self, description):
@@ -132,17 +132,17 @@ class Command(object):
 
     #: List of options to parse before parsing other options.
     preload_options = (
-        Option("-A", "--app", default=None),
-        Option("-b", "--broker", default=None),
-        Option("--loader", default=None),
-        Option("--config", default="celeryconfig", dest="config_module"),
+        Option('-A', '--app', default=None),
+        Option('-b', '--broker', default=None),
+        Option('--loader', default=None),
+        Option('--config', default='celeryconfig', dest='config_module'),
     )
 
     #: Enable if the application should support config from the cmdline.
     enable_config_from_cmdline = False
 
     #: Default configuration namespace.
-    namespace = "celery"
+    namespace = 'celery'
 
     #: Text to print at end of --help
     epilog = None
@@ -156,7 +156,7 @@ class Command(object):
 
     def run(self, *args, **options):
         """This is the body of the command called by :meth:`handle_argv`."""
-        raise NotImplementedError("subclass responsibility")
+        raise NotImplementedError('subclass responsibility')
 
     def execute_from_commandline(self, argv=None):
         """Execute application from command line.
@@ -173,7 +173,7 @@ class Command(object):
 
     def usage(self, command):
         """Returns the command-line usage string for this app."""
-        return "%%prog [options] %s" % (self.args, )
+        return '%%prog [options] %s' % (self.args, )
 
     def get_options(self):
         """Get supported command line options."""
@@ -212,15 +212,15 @@ class Command(object):
             self.die(ARGV_DISABLED % (', '.join(args, )), EX_USAGE)
 
     def die(self, msg, status=EX_FAILURE):
-        sys.stderr.write(msg + "\n")
+        sys.stderr.write(msg + '\n')
         sys.exit(status)
 
     def parse_options(self, prog_name, arguments):
         """Parse the available options."""
         # Don't want to load configuration to just print the version,
         # so we handle --version manually here.
-        if "--version" in arguments:
-            sys.stdout.write("%s\n" % self.version)
+        if '--version' in arguments:
+            sys.stdout.write('%s\n' % self.version)
             sys.exit(0)
         parser = self.create_parser(prog_name)
         return parser.parse_args(arguments)
@@ -241,7 +241,7 @@ class Command(object):
             for long_opt, help in doc.iteritems():
                 option = parser.get_option(long_opt)
                 if option is not None:
-                    option.help = ' '.join(help) % {"default": option.default}
+                    option.help = ' '.join(help) % {'default': option.default}
         return parser
 
     def prepare_preload_options(self, options):
@@ -256,18 +256,18 @@ class Command(object):
     def setup_app_from_commandline(self, argv):
         preload_options = self.parse_preload_options(argv)
         self.prepare_preload_options(preload_options)
-        app = (preload_options.get("app") or
-               os.environ.get("CELERY_APP") or
+        app = (preload_options.get('app') or
+               os.environ.get('CELERY_APP') or
                self.app)
-        loader = (preload_options.get("loader") or
-                  os.environ.get("CELERY_LOADER") or
-                  "default")
-        broker = preload_options.get("broker", None)
+        loader = (preload_options.get('loader') or
+                  os.environ.get('CELERY_LOADER') or
+                  'default')
+        broker = preload_options.get('broker', None)
         if broker:
-            os.environ["CELERY_BROKER_URL"] = broker
-        config_module = preload_options.get("config_module")
+            os.environ['CELERY_BROKER_URL'] = broker
+        config_module = preload_options.get('config_module')
         if config_module:
-            os.environ["CELERY_CONFIG_MODULE"] = config_module
+            os.environ['CELERY_CONFIG_MODULE'] = config_module
         if app:
             self.app = self.find_app(app)
         else:
@@ -279,8 +279,8 @@ class Command(object):
     def find_app(self, app):
         sym = self.symbol_by_name(app)
         if isinstance(sym, ModuleType):
-            if getattr(sym, "__path__", None):
-                return self.find_app("%s.celery:" % (app.replace(":", ""), ))
+            if getattr(sym, '__path__', None):
+                return self.find_app('%s.celery:' % (app.replace(':', ''), ))
             return sym.celery
         return sym
 
@@ -323,11 +323,11 @@ class Command(object):
     def parse_doc(self, doc):
         options, in_option = defaultdict(list), None
         for line in doc.splitlines():
-            if line.startswith(".. cmdoption::"):
+            if line.startswith('.. cmdoption::'):
                 m = find_long_opt.match(line)
                 if m:
                     in_option = m.groups()[0].strip()
-                assert in_option, "missing long opt"
+                assert in_option, 'missing long opt'
             elif in_option and line.startswith(' ' * 4):
                 options[in_option].append(find_rst_ref.sub(r'\1',
                     line.strip()).replace('`', ''))
@@ -340,10 +340,10 @@ class Command(object):
 
 def daemon_options(default_pidfile=None, default_logfile=None):
     return (
-        Option("-f", "--logfile", default=default_logfile),
-        Option("--pidfile", default=default_pidfile),
-        Option("--uid", default=None),
-        Option("--gid", default=None),
-        Option("--umask", default=0, type="int"),
-        Option("--workdir", default=None, dest="working_directory"),
+        Option('-f', '--logfile', default=default_logfile),
+        Option('--pidfile', default=default_pidfile),
+        Option('--uid', default=None),
+        Option('--gid', default=None),
+        Option('--umask', default=0, type='int'),
+        Option('--workdir', default=None, dest='working_directory'),
     )

+ 72 - 72
celery/bin/camqadm.py

@@ -37,7 +37,7 @@ Example:
 
 
 def say(m, fh=sys.stderr):
-    fh.write("%s\n" % (m, ))
+    fh.write('%s\n' % (m, ))
 
 
 class Spec(object):
@@ -62,14 +62,14 @@ class Spec(object):
     """
     def __init__(self, *args, **kwargs):
         self.args = args
-        self.returns = kwargs.get("returns")
+        self.returns = kwargs.get('returns')
 
     def coerce(self, index, value):
         """Coerce value for argument at index.
 
-        E.g. if :attr:`args` is `[("is_active", bool)]`:
+        E.g. if :attr:`args` is `[('is_active', bool)]`:
 
-            >>> coerce(0, "False")
+            >>> coerce(0, 'False')
             False
 
         """
@@ -84,9 +84,9 @@ class Spec(object):
 
         e.g:
 
-            >>> spec = Spec([("queue", str), ("if_unused", bool)])
-            >>> spec.str_args_to_python("pobox", "true")
-            ("pobox", True)
+            >>> spec = Spec([('queue', str), ('if_unused', bool)])
+            >>> spec.str_args_to_python('pobox', 'true')
+            ('pobox', True)
 
         """
         return tuple(self.coerce(index, value)
@@ -96,7 +96,7 @@ class Spec(object):
         """Format the return value of this command in a human-friendly way."""
         if not self.returns:
             if response is None:
-                return "ok."
+                return 'ok.'
             return response
         if callable(self.returns):
             return self.returns(response)
@@ -104,24 +104,24 @@ class Spec(object):
 
     def format_arg(self, name, type, default_value=None):
         if default_value is not None:
-            return "%s:%s" % (name, default_value)
+            return '%s:%s' % (name, default_value)
         return name
 
     def format_signature(self):
-        return " ".join(self.format_arg(*padlist(list(arg), 3))
+        return ' '.join(self.format_arg(*padlist(list(arg), 3))
                             for arg in self.args)
 
 
 def dump_message(message):
     if message is None:
-        return "No messages in queue. basic.publish something."
-    return {"body": message.body,
-            "properties": message.properties,
-            "delivery_info": message.delivery_info}
+        return 'No messages in queue. basic.publish something.'
+    return {'body': message.body,
+            'properties': message.properties,
+            'delivery_info': message.delivery_info}
 
 
 def format_declare_queue(ret):
-    return "ok. queue:%s messages:%s consumers:%s." % ret
+    return 'ok. queue:%s messages:%s consumers:%s.' % ret
 
 
 class AMQShell(cmd.Cmd):
@@ -145,55 +145,55 @@ class AMQShell(cmd.Cmd):
     """
     conn = None
     chan = None
-    prompt_fmt = "%d> "
-    identchars = cmd.IDENTCHARS = "."
+    prompt_fmt = '%d> '
+    identchars = cmd.IDENTCHARS = '.'
     needs_reconnect = False
     counter = 1
     inc_counter = count(2).next
 
-    builtins = {"EOF": "do_exit",
-                "exit": "do_exit",
-                "help": "do_help"}
+    builtins = {'EOF': 'do_exit',
+                'exit': 'do_exit',
+                'help': 'do_help'}
 
     amqp = {
-        "exchange.declare": Spec(("exchange", str),
-                                 ("type", str),
-                                 ("passive", bool, "no"),
-                                 ("durable", bool, "no"),
-                                 ("auto_delete", bool, "no"),
-                                 ("internal", bool, "no")),
-        "exchange.delete": Spec(("exchange", str),
-                                ("if_unused", bool)),
-        "queue.bind": Spec(("queue", str),
-                           ("exchange", str),
-                           ("routing_key", str)),
-        "queue.declare": Spec(("queue", str),
-                              ("passive", bool, "no"),
-                              ("durable", bool, "no"),
-                              ("exclusive", bool, "no"),
-                              ("auto_delete", bool, "no"),
+        'exchange.declare': Spec(('exchange', str),
+                                 ('type', str),
+                                 ('passive', bool, 'no'),
+                                 ('durable', bool, 'no'),
+                                 ('auto_delete', bool, 'no'),
+                                 ('internal', bool, 'no')),
+        'exchange.delete': Spec(('exchange', str),
+                                ('if_unused', bool)),
+        'queue.bind': Spec(('queue', str),
+                           ('exchange', str),
+                           ('routing_key', str)),
+        'queue.declare': Spec(('queue', str),
+                              ('passive', bool, 'no'),
+                              ('durable', bool, 'no'),
+                              ('exclusive', bool, 'no'),
+                              ('auto_delete', bool, 'no'),
                               returns=format_declare_queue),
-        "queue.delete": Spec(("queue", str),
-                             ("if_unused", bool, "no"),
-                             ("if_empty", bool, "no"),
-                             returns="ok. %d messages deleted."),
-        "queue.purge": Spec(("queue", str),
-                            returns="ok. %d messages deleted."),
-        "basic.get": Spec(("queue", str),
-                          ("no_ack", bool, "off"),
+        'queue.delete': Spec(('queue', str),
+                             ('if_unused', bool, 'no'),
+                             ('if_empty', bool, 'no'),
+                             returns='ok. %d messages deleted.'),
+        'queue.purge': Spec(('queue', str),
+                            returns='ok. %d messages deleted.'),
+        'basic.get': Spec(('queue', str),
+                          ('no_ack', bool, 'off'),
                           returns=dump_message),
-        "basic.publish": Spec(("msg", amqp.Message),
-                              ("exchange", str),
-                              ("routing_key", str),
-                              ("mandatory", bool, "no"),
-                              ("immediate", bool, "no")),
-        "basic.ack": Spec(("delivery_tag", int)),
+        'basic.publish': Spec(('msg', amqp.Message),
+                              ('exchange', str),
+                              ('routing_key', str),
+                              ('mandatory', bool, 'no'),
+                              ('immediate', bool, 'no')),
+        'basic.ack': Spec(('delivery_tag', int)),
     }
 
     def __init__(self, *args, **kwargs):
-        self.connect = kwargs.pop("connect")
-        self.silent = kwargs.pop("silent", False)
-        self.out = kwargs.pop("out", sys.stderr)
+        self.connect = kwargs.pop('connect')
+        self.silent = kwargs.pop('silent', False)
+        self.out = kwargs.pop('out', sys.stderr)
         cmd.Cmd.__init__(self, *args, **kwargs)
         self._reconnect()
 
@@ -214,7 +214,7 @@ class AMQShell(cmd.Cmd):
 
         Example:
 
-            >>> get_amqp_api_command("queue.delete", ["pobox", "yes", "no"])
+            >>> get_amqp_api_command('queue.delete', ['pobox', 'yes', 'no'])
             (<bound method Channel.queue_delete of
              <amqplib.client_0_8.channel.Channel object at 0x...>>,
              ('testfoo', True, False))
@@ -222,19 +222,19 @@ class AMQShell(cmd.Cmd):
         """
         spec = self.amqp[cmd]
         args = spec.str_args_to_python(arglist)
-        attr_name = cmd.replace(".", "_")
+        attr_name = cmd.replace('.', '_')
         if self.needs_reconnect:
             self._reconnect()
         return getattr(self.chan, attr_name), args, spec.format_response
 
     def do_exit(self, *args):
-        """The `"exit"` command."""
+        """The `'exit'` command."""
         self.note("\n-> please, don't leave!")
         sys.exit(0)
 
     def display_command_help(self, cmd, short=False):
         spec = self.amqp[cmd]
-        self.say("%s %s" % (cmd, spec.format_signature()))
+        self.say('%s %s' % (cmd, spec.format_signature()))
 
     def do_help(self, *args):
         if not args:
@@ -255,11 +255,11 @@ class AMQShell(cmd.Cmd):
         """Return all commands starting with `text`, for tab-completion."""
         names = self.get_names()
         first = [cmd for cmd in names
-                        if cmd.startswith(text.replace("_", "."))]
+                        if cmd.startswith(text.replace('_', '.'))]
         if first:
             return first
         return [cmd for cmd in names
-                    if cmd.partition(".")[2].startswith(text)]
+                    if cmd.partition('.')[2].startswith(text)]
 
     def dispatch(self, cmd, argline):
         """Dispatch and execute the command.
@@ -281,14 +281,14 @@ class AMQShell(cmd.Cmd):
 
         E.g::
 
-            >>> parseline("queue.delete A 'B' C")
-            ("queue.delete", "A 'B' C", "queue.delete A 'B' C")
+            >>> parseline('queue.delete A 'B' C')
+            ('queue.delete', 'A 'B' C', 'queue.delete A 'B' C')
 
         """
         parts = line.split()
         if parts:
-            return parts[0], " ".join(parts[1:]), line
-        return "", "", line
+            return parts[0], ' '.join(parts[1:]), line
+        return '', '', line
 
     def onecmd(self, line):
         """Parse line and execute command."""
@@ -334,28 +334,28 @@ class AMQPAdmin(object):
     Shell = AMQShell
 
     def __init__(self, *args, **kwargs):
-        self.app = app_or_default(kwargs.get("app"))
-        self.out = kwargs.setdefault("out", sys.stderr)
-        self.silent = kwargs.get("silent")
+        self.app = app_or_default(kwargs.get('app'))
+        self.out = kwargs.setdefault('out', sys.stderr)
+        self.silent = kwargs.get('silent')
         self.args = args
 
     def connect(self, conn=None):
         if conn:
             conn.close()
         conn = self.app.broker_connection()
-        self.note("-> connecting to %s." % conn.as_uri())
+        self.note('-> connecting to %s.' % conn.as_uri())
         conn.connect()
-        self.note("-> connected.")
+        self.note('-> connected.')
         return conn
 
     def run(self):
         shell = self.Shell(connect=self.connect, out=self.out)
         if self.args:
-            return shell.onecmd(" ".join(self.args))
+            return shell.onecmd(' '.join(self.args))
         try:
             return shell.cmdloop()
         except KeyboardInterrupt:
-            self.note("(bibi)")
+            self.note('(bibi)')
             pass
 
     def note(self, m):
@@ -366,7 +366,7 @@ class AMQPAdmin(object):
 class AMQPAdminCommand(Command):
 
     def run(self, *args, **options):
-        options["app"] = self.app
+        options['app'] = self.app
         return AMQPAdmin(*args, **options).run()
 
 
@@ -377,5 +377,5 @@ def camqadm(*args, **options):
 def main():
     AMQPAdminCommand().execute_from_commandline()
 
-if __name__ == "__main__":  # pragma: no cover
+if __name__ == '__main__':  # pragma: no cover
     main()

+ 178 - 192
celery/bin/celery.py

@@ -37,18 +37,9 @@ Type '%(prog_name)s <command> --help' for help using a specific command.
 commands = {}
 
 command_classes = (
-    ("Main",
-        ["worker", "events", "beat", "shell", "amqp", "help"],
-        "green",
-    ),
-    ("Remote Control",
-        ["status", "inspect", "control"],
-        "blue",
-    ),
-    ("Utils",
-        ["purge", "list", "migrate", "apply", "result", "report"],
-        None,
-    ),
+    ('Main', ['worker', 'events', 'beat', 'shell', 'amqp', 'help'], 'green'),
+    ('Remote Control', ['status', 'inspect', 'control'], 'blue'),
+    ('Utils', ['purge', 'list', 'migrate', 'apply', 'result', 'report'], None),
 )
 
 
@@ -70,16 +61,16 @@ def command(fun, name=None, sortpri=0):
 
 
 class Command(BaseCommand):
-    help = ""
-    args = ""
+    help = ''
+    args = ''
     version = __version__
-    prog_name = "celery"
+    prog_name = 'celery'
     show_body = True
     leaf = True
 
     option_list = (
-        Option("--quiet", "-q", action="store_true"),
-        Option("--no-color", "-C", action="store_true"),
+        Option('--quiet', '-q', action='store_true'),
+        Option('--no-color', '-C', action='store_true'),
     )
 
     def __init__(self, app=None, no_color=False, stdout=sys.stdout,
@@ -94,13 +85,13 @@ class Command(BaseCommand):
         try:
             ret = self.run(*args, **kwargs)
         except Error, exc:
-            self.error(self.colored.red("Error: %s" % exc))
+            self.error(self.colored.red('Error: %s' % exc))
             return exc.status
 
         return ret if ret is not None else EX_OK
 
     def show_help(self, command):
-        self.run_from_argv(self.prog_name, [command, "--help"])
+        self.run_from_argv(self.prog_name, [command, '--help'])
         return EX_USAGE
 
     def error(self, s):
@@ -108,8 +99,8 @@ class Command(BaseCommand):
 
     def out(self, s, fh=None):
         s = str(s)
-        if not s.endswith("\n"):
-            s += "\n"
+        if not s.endswith('\n'):
+            s += '\n'
         (fh or self.stdout).write(s)
 
     def run_from_argv(self, prog_name, argv):
@@ -119,55 +110,55 @@ class Command(BaseCommand):
         self.parser = self.create_parser(self.prog_name, self.command)
         options, args = self.prepare_args(
                 *self.parser.parse_args(self.arglist))
-        self.colored = term.colored(enabled=not options["no_color"])
-        self.quiet = options.get("quiet", False)
-        self.show_body = options.get("show_body", True)
+        self.colored = term.colored(enabled=not options['no_color'])
+        self.quiet = options.get('quiet', False)
+        self.show_body = options.get('show_body', True)
         return self(*args, **options)
 
     def usage(self, command):
-        return "%%prog %s [options] %s" % (command, self.args)
+        return '%%prog %s [options] %s' % (command, self.args)
 
     def prettify_list(self, n):
         c = self.colored
         if not n:
-            return "- empty -"
-        return "\n".join(str(c.reset(c.white("*"), " %s" % (item, )))
+            return '- empty -'
+        return '\n'.join(str(c.reset(c.white('*'), ' %s' % (item, )))
                             for item in n)
 
     def prettify_dict_ok_error(self, n):
         c = self.colored
         try:
-            return (c.green("OK"),
-                    text.indent(self.prettify(n["ok"])[1], 4))
+            return (c.green('OK'),
+                    text.indent(self.prettify(n['ok'])[1], 4))
         except KeyError:
             pass
-        return (c.red("ERROR"),
-                text.indent(self.prettify(n["error"])[1], 4))
+        return (c.red('ERROR'),
+                text.indent(self.prettify(n['error'])[1], 4))
 
     def say_remote_command_reply(self, replies):
         c = self.colored
         node = replies.keys()[0]
         reply = replies[node]
         status, preply = self.prettify(reply)
-        self.say_chat("->", c.cyan(node, ": ") + status,
+        self.say_chat('->', c.cyan(node, ': ') + status,
                       text.indent(preply, 4))
 
     def prettify(self, n):
-        OK = str(self.colored.green("OK"))
+        OK = str(self.colored.green('OK'))
         if isinstance(n, list):
             return OK, self.prettify_list(n)
         if isinstance(n, dict):
-            if "ok" in n or "error" in n:
+            if 'ok' in n or 'error' in n:
                 return self.prettify_dict_ok_error(n)
         if isinstance(n, basestring):
             return OK, unicode(n)
         return OK, pformat(n)
 
-    def say_chat(self, direction, title, body=""):
+    def say_chat(self, direction, title, body=''):
         c = self.colored
-        if direction == "<-" and self.quiet:
+        if direction == '<-' and self.quiet:
             return
-        dirstr = not self.quiet and c.bold(c.white(direction), " ") or ""
+        dirstr = not self.quiet and c.bold(c.white(direction), ' ') or ''
         self.out(c.reset(dirstr, title))
         if body and self.show_body:
             self.out(body)
@@ -197,11 +188,6 @@ class Delegate(Command):
         return self.target.run(*args, **kwargs)
 
 
-def create_delegate(name, Command):
-    return command(type(name, (Delegate, ), {"Command": Command,
-                                             "__module__": __name__}))
-
-
 class worker(Delegate):
     """Start worker instance.
 
@@ -215,7 +201,7 @@ class worker(Delegate):
 
         celery worker --autoscale=10,0
     """
-    Command = "celery.bin.celeryd:WorkerCommand"
+    Command = 'celery.bin.celeryd:WorkerCommand'
 worker = command(worker, sortpri=01)
 
 
@@ -238,7 +224,7 @@ class events(Delegate):
         celery events -d
         celery events -C mod.attr -F 1.0 --detach --maxrate=100/m -l info
     """
-    Command = "celery.bin.celeryev:EvCommand"
+    Command = 'celery.bin.celeryev:EvCommand'
 events = command(events, sortpri=10)
 
 
@@ -252,7 +238,7 @@ class beat(Delegate):
         celery beat -S djcelery.schedulers.DatabaseScheduler
 
     """
-    Command = "celery.bin.celerybeat:BeatCommand"
+    Command = 'celery.bin.celerybeat:BeatCommand'
 beat = command(beat, sortpri=20)
 
 
@@ -273,7 +259,7 @@ class amqp(Delegate):
         celery amqp queue.delete queue yes yes
 
     """
-    Command = "celery.bin.camqadm:AMQPAdminCommand"
+    Command = 'celery.bin.camqadm:AMQPAdminCommand'
 amqp = command(amqp, sortpri=30)
 
 
@@ -286,33 +272,33 @@ class list_(Command):
 
     NOTE: For RabbitMQ the management plugin is required.
     """
-    args = "[bindings]"
+    args = '[bindings]'
 
     def list_bindings(self, management):
         try:
             bindings = management.get_bindings()
         except NotImplementedError:
-            raise Error("Your transport cannot list bindings.")
+            raise Error('Your transport cannot list bindings.')
 
-        fmt = lambda q, e, r: self.out("%s %s %s" % (q.ljust(28),
+        fmt = lambda q, e, r: self.out('%s %s %s' % (q.ljust(28),
                                                      e.ljust(28), r))
-        fmt("Queue", "Exchange", "Routing Key")
-        fmt("-" * 16, "-" * 16, "-" * 16)
+        fmt('Queue', 'Exchange', 'Routing Key')
+        fmt('-' * 16, '-' * 16, '-' * 16)
         for b in bindings:
-            fmt(b["destination"], b["source"], b["routing_key"])
+            fmt(b['destination'], b['source'], b['routing_key'])
 
     def run(self, what=None, *_, **kw):
-        topics = {"bindings": self.list_bindings}
+        topics = {'bindings': self.list_bindings}
         available = ', '.join(topics.keys())
         if not what:
-            raise Error("You must specify what to list (%s)" % available)
+            raise Error('You must specify what to list (%s)' % available)
         if what not in topics:
-            raise Error("unknown topic %r (choose one of: %s)" % (
+            raise Error('unknown topic %r (choose one of: %s)' % (
                             what, available))
         with self.app.broker_connection() as conn:
             self.app.amqp.TaskConsumer(conn).declare()
             topics[what](conn.manager)
-list_ = command(list_, "list")
+list_ = command(list_, 'list')
 
 
 class apply(Command):
@@ -323,33 +309,33 @@ class apply(Command):
         celery apply tasks.add --args='[2, 2]'
         celery apply tasks.add --args='[2, 2]' --countdown=10
     """
-    args = "<task_name>"
+    args = '<task_name>'
     option_list = Command.option_list + (
-            Option("--args", "-a", help="positional arguments (json)."),
-            Option("--kwargs", "-k", help="keyword arguments (json)."),
-            Option("--eta", help="scheduled time (ISO-8601)."),
-            Option("--countdown", type="float",
-                help="eta in seconds from now (float/int)."),
-            Option("--expires", help="expiry time (ISO-8601/float/int)."),
-            Option("--serializer", default="json", help="defaults to json."),
-            Option("--queue", help="custom queue name."),
-            Option("--exchange", help="custom exchange name."),
-            Option("--routing-key", help="custom routing key."),
+            Option('--args', '-a', help='positional arguments (json).'),
+            Option('--kwargs', '-k', help='keyword arguments (json).'),
+            Option('--eta', help='scheduled time (ISO-8601).'),
+            Option('--countdown', type='float',
+                help='eta in seconds from now (float/int).'),
+            Option('--expires', help='expiry time (ISO-8601/float/int).'),
+            Option('--serializer', default='json', help='defaults to json.'),
+            Option('--queue', help='custom queue name.'),
+            Option('--exchange', help='custom exchange name.'),
+            Option('--routing-key', help='custom routing key.'),
     )
 
     def run(self, name, *_, **kw):
         # Positional args.
-        args = kw.get("args") or ()
+        args = kw.get('args') or ()
         if isinstance(args, basestring):
             args = anyjson.loads(args)
 
         # Keyword args.
-        kwargs = kw.get("kwargs") or {}
+        kwargs = kw.get('kwargs') or {}
         if isinstance(kwargs, basestring):
             kwargs = anyjson.loads(kwargs)
 
         # Expires can be int/float.
-        expires = kw.get("expires") or None
+        expires = kw.get('expires') or None
         try:
             expires = float(expires)
         except (TypeError, ValueError):
@@ -360,12 +346,12 @@ class apply(Command):
                 raise
 
         res = self.app.send_task(name, args=args, kwargs=kwargs,
-                                 countdown=kw.get("countdown"),
-                                 serializer=kw.get("serializer"),
-                                 queue=kw.get("queue"),
-                                 exchange=kw.get("exchange"),
-                                 routing_key=kw.get("routing_key"),
-                                 eta=maybe_iso8601(kw.get("eta")),
+                                 countdown=kw.get('countdown'),
+                                 serializer=kw.get('serializer'),
+                                 queue=kw.get('queue'),
+                                 exchange=kw.get('exchange'),
+                                 routing_key=kw.get('routing_key'),
+                                 eta=maybe_iso8601(kw.get('eta')),
                                  expires=expires)
         self.out(res.id)
 apply = command(apply)
@@ -381,12 +367,12 @@ class purge(Command):
         queues = len(self.app.amqp.queues.keys())
         messages_removed = self.app.control.purge()
         if messages_removed:
-            self.out("Purged %s %s from %s known task %s." % (
-                messages_removed, text.pluralize(messages_removed, "message"),
-                queues, text.pluralize(queues, "queue")))
+            self.out('Purged %s %s from %s known task %s.' % (
+                messages_removed, text.pluralize(messages_removed, 'message'),
+                queues, text.pluralize(queues, 'queue')))
         else:
-            self.out("No messages purged from %s known %s" % (
-                queues, text.pluralize(queues, "queue")))
+            self.out('No messages purged from %s known %s' % (
+                queues, text.pluralize(queues, 'queue')))
 purge = command(purge)
 
 
@@ -400,17 +386,17 @@ class result(Command):
         celery result 8f511516-e2f5-4da4-9d2f-0fb83a86e500 --traceback
 
     """
-    args = "<task_id>"
+    args = '<task_id>'
     option_list = Command.option_list + (
-            Option("--task", "-t", help="name of task (if custom backend)"),
-            Option("--traceback", action="store_true",
-                   help="show traceback instead"),
+            Option('--task', '-t', help='name of task (if custom backend)'),
+            Option('--traceback', action='store_true',
+                   help='show traceback instead'),
     )
 
     def run(self, task_id, *args, **kwargs):
         result_cls = self.app.AsyncResult
-        task = kwargs.get("task")
-        traceback = kwargs.get("traceback", False)
+        task = kwargs.get('task')
+        traceback = kwargs.get('traceback', False)
 
         if task:
             result_cls = self.app.tasks[task].AsyncResult
@@ -428,13 +414,13 @@ class _RemoteControl(Command):
     choices = None
     leaf = False
     option_list = Command.option_list + (
-                Option("--timeout", "-t", type="float",
-                    help="Timeout in seconds (float) waiting for reply"),
-                Option("--destination", "-d",
-                    help="Comma separated list of destination node names."))
+                Option('--timeout', '-t', type='float',
+                    help='Timeout in seconds (float) waiting for reply'),
+                Option('--destination', '-d',
+                    help='Comma separated list of destination node names.'))
 
     @classmethod
-    def get_command_info(self, command, indent=0, prefix="", color=None,
+    def get_command_info(self, command, indent=0, prefix='', color=None,
             help=False):
         if help:
             help = '|' + text.indent(self.choices[command][1], indent + 4)
@@ -444,52 +430,52 @@ class _RemoteControl(Command):
             # see if it uses args.
             meth = getattr(self, command)
             return text.join([
-                '|' + text.indent("%s%s %s" % (prefix, color(command),
+                '|' + text.indent('%s%s %s' % (prefix, color(command),
                                                meth.__doc__), indent), help,
             ])
 
         except AttributeError:
             return text.join([
-                "|" + text.indent(prefix + str(color(command)), indent), help,
+                '|' + text.indent(prefix + str(color(command)), indent), help,
             ])
 
     @classmethod
-    def list_commands(self, indent=0, prefix="", color=None, help=False):
+    def list_commands(self, indent=0, prefix='', color=None, help=False):
         color = color if color else lambda x: x
-        prefix = prefix + " " if prefix else ""
-        return "\n".join(self.get_command_info(c, indent, prefix, color, help)
+        prefix = prefix + ' ' if prefix else ''
+        return '\n'.join(self.get_command_info(c, indent, prefix, color, help)
                             for c in sorted(self.choices))
 
     @property
     def epilog(self):
-        return "\n".join([
-            "[Commands]",
+        return '\n'.join([
+            '[Commands]',
             self.list_commands(indent=4, help=True)
         ])
 
     def usage(self, command):
-        return "%%prog %s [options] %s <command> [arg1 .. argN]" % (
+        return '%%prog %s [options] %s <command> [arg1 .. argN]' % (
                 command, self.args)
 
     def call(self, *args, **kwargs):
-        raise NotImplementedError("get_obj")
+        raise NotImplementedError('get_obj')
 
     def run(self, *args, **kwargs):
         if not args:
-            raise Error("Missing %s method. See --help" % self.name)
+            raise Error('Missing %s method. See --help' % self.name)
         return self.do_call_method(args, **kwargs)
 
     def do_call_method(self, args, **kwargs):
         method = args[0]
-        if method == "help":
-            raise Error("Did you mean '%s --help'?" % self.name)
+        if method == 'help':
+            raise Error("Did you mean '%s --help'?' % self.name)
         if method not in self.choices:
-            raise Error("Unknown %s method %s" % (self.name, method))
+            raise Error('Unknown %s method %s' % (self.name, method))
 
-        destination = kwargs.get("destination")
-        timeout = kwargs.get("timeout") or self.choices[method][0]
+        destination = kwargs.get('destination')
+        timeout = kwargs.get('timeout') or self.choices[method][0]
         if destination and isinstance(destination, basestring):
-            destination = map(str.strip, destination.split(","))
+            destination = map(str.strip, destination.split(','))
 
         try:
             handler = getattr(self, method)
@@ -500,15 +486,15 @@ class _RemoteControl(Command):
                           timeout=timeout, destination=destination,
                           callback=self.say_remote_command_reply)
         if not replies:
-            raise Error("No nodes replied within time constraint.",
+            raise Error('No nodes replied within time constraint.',
                         status=EX_UNAVAILABLE)
         return replies
 
-    def say(self, direction, title, body=""):
+    def say(self, direction, title, body=''):
         c = self.colored
-        if direction == "<-" and self.quiet:
+        if direction == '<-' and self.quiet:
             return
-        dirstr = not self.quiet and c.bold(c.white(direction), " ") or ""
+        dirstr = not self.quiet and c.bold(c.white(direction), ' ') or ''
         self.out(c.reset(dirstr, title))
         if body and self.show_body:
             self.out(body)
@@ -526,17 +512,17 @@ class inspect(_RemoteControl):
         celery inspect revoked -d w1.e.com,w2.e.com
 
     """
-    name = "inspect"
+    name = 'inspect'
     choices = {
-        "active": (1.0, "dump active tasks (being processed)"),
-        "active_queues": (1.0, "dump queues being consumed from"),
-        "scheduled": (1.0, "dump scheduled tasks (eta/countdown/retry)"),
-        "reserved": (1.0, "dump reserved tasks (waiting to be processed)"),
-        "stats": (1.0, "dump worker statistics"),
-        "revoked": (1.0, "dump of revoked task ids"),
-        "registered": (1.0, "dump of registered tasks"),
-        "ping": (0.2, "ping worker(s)"),
-        "report": (1.0, "get bugreport info")
+        'active': (1.0, 'dump active tasks (being processed)'),
+        'active_queues': (1.0, 'dump queues being consumed from'),
+        'scheduled': (1.0, 'dump scheduled tasks (eta/countdown/retry)'),
+        'reserved': (1.0, 'dump reserved tasks (waiting to be processed)'),
+        'stats': (1.0, 'dump worker statistics'),
+        'revoked': (1.0, 'dump of revoked task ids'),
+        'registered': (1.0, 'dump of registered tasks'),
+        'ping': (0.2, 'ping worker(s)'),
+        'report': (1.0, 'get bugreport info')
     }
 
     def call(self, method, *args, **options):
@@ -562,19 +548,19 @@ class control(_RemoteControl):
         celery control -d w1.e.com add_consumer queue exchange direct rkey
 
     """
-    name = "control"
+    name = 'control'
     choices = {
-        "enable_events": (1.0, "tell worker(s) to enable events"),
-        "disable_events": (1.0, "tell worker(s) to disable events"),
-        "add_consumer": (1.0, "tell worker(s) to start consuming a queue"),
-        "cancel_consumer": (1.0, "tell worker(s) to stop consuming a queue"),
-        "rate_limit": (1.0,
-            "tell worker(s) to modify the rate limit for a task type"),
-        "time_limit": (1.0,
-            "tell worker(s) to modify the time limit for a task type."),
-        "autoscale": (1.0, "change autoscale settings"),
-        "pool_grow": (1.0, "start more pool processes"),
-        "pool_shrink": (1.0, "use less pool processes"),
+        'enable_events': (1.0, 'tell worker(s) to enable events'),
+        'disable_events': (1.0, 'tell worker(s) to disable events'),
+        'add_consumer': (1.0, 'tell worker(s) to start consuming a queue'),
+        'cancel_consumer': (1.0, 'tell worker(s) to stop consuming a queue'),
+        'rate_limit': (1.0,
+            'tell worker(s) to modify the rate limit for a task type'),
+        'time_limit': (1.0,
+            'tell worker(s) to modify the time limit for a task type.'),
+        'autoscale': (1.0, 'change autoscale settings'),
+        'pool_grow': (1.0, 'start more pool processes'),
+        'pool_shrink': (1.0, 'use less pool processes'),
     }
 
     def call(self, method, *args, **options):
@@ -601,7 +587,7 @@ class control(_RemoteControl):
         return self.call(method, task_name, soft, hard, **kwargs)
 
     def add_consumer(self, method, queue, exchange=None,
-            exchange_type="direct", routing_key=None, **kwargs):
+            exchange_type='direct', routing_key=None, **kwargs):
         """<queue> [exchange [type [routing_key]]]"""
         return self.call(method, queue, exchange,
                          exchange_type, routing_key, **kwargs)
@@ -618,16 +604,16 @@ class status(Command):
 
     def run(self, *args, **kwargs):
         replies = inspect(app=self.app,
-                          no_color=kwargs.get("no_color", False),
+                          no_color=kwargs.get('no_color', False),
                           stdout=self.stdout, stderr=self.stderr) \
-                    .run("ping", **dict(kwargs, quiet=True, show_body=False))
+                    .run('ping', **dict(kwargs, quiet=True, show_body=False))
         if not replies:
-            raise Error("No nodes replied within time constraint",
+            raise Error('No nodes replied within time constraint',
                         status=EX_UNAVAILABLE)
         nodecount = len(replies)
-        if not kwargs.get("quiet", False):
-            self.out("\n%s %s online." % (nodecount,
-                                          text.pluralize(nodecount, "node")))
+        if not kwargs.get('quiet', False):
+            self.out('\n%s %s online.' % (nodecount,
+                                          text.pluralize(nodecount, 'node')))
 status = command(status)
 
 
@@ -643,15 +629,15 @@ class migrate(Command):
           a backup of the tasks before you continue.
     """
     def usage(self, command):
-        return "%%prog %s <source_url> <dest_url>" % (command, )
+        return '%%prog %s <source_url> <dest_url>' % (command, )
 
     def on_migrate_task(self, state, body, message):
-        self.out("Migrating task %s/%s: %s[%s]" % (
-            state.count, state.strtotal, body["task"], body["id"]))
+        self.out('Migrating task %s/%s: %s[%s]' % (
+            state.count, state.strtotal, body['task'], body['id']))
 
     def run(self, *args, **kwargs):
         if len(args) != 2:
-            return self.show_help("migrate")
+            return self.show_help('migrate')
         from kombu import BrokerConnection
         from celery.contrib.migrate import migrate_tasks
 
@@ -683,46 +669,46 @@ class shell(Command):  # pragma: no cover
         <AsyncResult: 537b48c7-d6d3-427a-a24a-d1b4414035be>
     """
     option_list = Command.option_list + (
-                Option("--ipython", "-I",
-                    action="store_true", dest="force_ipython",
-                    help="force iPython."),
-                Option("--bpython", "-B",
-                    action="store_true", dest="force_bpython",
-                    help="force bpython."),
-                Option("--python", "-P",
-                    action="store_true", dest="force_python",
-                    help="force default Python shell."),
-                Option("--without-tasks", "-T", action="store_true",
+                Option('--ipython', '-I',
+                    action='store_true', dest='force_ipython',
+                    help='force iPython.'),
+                Option('--bpython', '-B',
+                    action='store_true', dest='force_bpython',
+                    help='force bpython.'),
+                Option('--python', '-P',
+                    action='store_true', dest='force_python',
+                    help='force default Python shell.'),
+                Option('--without-tasks', '-T', action='store_true',
                     help="don't add tasks to locals."),
-                Option("--eventlet", action="store_true",
-                    help="use eventlet."),
-                Option("--gevent", action="store_true", help="use gevent."),
+                Option('--eventlet', action='store_true',
+                    help='use eventlet.'),
+                Option('--gevent', action='store_true', help='use gevent.'),
     )
 
     def run(self, force_ipython=False, force_bpython=False,
             force_python=False, without_tasks=False, eventlet=False,
             gevent=False, **kwargs):
         if eventlet:
-            import_module("celery.concurrency.eventlet")
+            import_module('celery.concurrency.eventlet')
         if gevent:
-            import_module("celery.concurrency.gevent")
+            import_module('celery.concurrency.gevent')
         import celery
         import celery.task.base
         self.app.loader.import_default_modules()
-        self.locals = {"celery": self.app,
-                       "Task": celery.Task,
-                       "chord": celery.chord,
-                       "group": celery.group,
-                       "chain": celery.chain,
-                       "chunks": celery.chunks,
-                       "xmap": celery.xmap,
-                       "xstarmap": celery.xstarmap,
-                       "subtask": celery.subtask}
+        self.locals = {'celery': self.app,
+                       'Task': celery.Task,
+                       'chord': celery.chord,
+                       'group': celery.group,
+                       'chain': celery.chain,
+                       'chunks': celery.chunks,
+                       'xmap': celery.xmap,
+                       'xstarmap': celery.xstarmap,
+                       'subtask': celery.subtask}
 
         if not without_tasks:
             self.locals.update(dict((task.__name__, task)
                                 for task in self.app.tasks.itervalues()
-                                    if not task.name.startswith("celery.")))
+                                    if not task.name.startswith('celery.')))
 
         if force_python:
             return self.invoke_fallback_shell()
@@ -755,7 +741,7 @@ class shell(Command):  # pragma: no cover
             import rlcompleter
             readline.set_completer(
                     rlcompleter.Completer(self.locals).complete)
-            readline.parse_and_bind("tab:complete")
+            readline.parse_and_bind('tab:complete')
         code.interact(local=self.locals)
 
     def invoke_ipython_shell(self):
@@ -777,12 +763,12 @@ class help(Command):
     """Show help screen and exit."""
 
     def usage(self, command):
-        return "%%prog <command> [options] %s" % (self.args, )
+        return '%%prog <command> [options] %s' % (self.args, )
 
     def run(self, *args, **kwargs):
         self.parser.print_help()
-        self.out(HELP % {"prog_name": self.prog_name,
-                         "commands": CeleryCommand.list_commands()})
+        self.out(HELP % {'prog_name': self.prog_name,
+                         'commands': CeleryCommand.list_commands()})
 
         return EX_USAGE
 help = command(help)
@@ -800,26 +786,26 @@ report = command(report)
 class CeleryCommand(BaseCommand):
     commands = commands
     enable_config_from_cmdline = True
-    prog_name = "celery"
+    prog_name = 'celery'
 
     def execute(self, command, argv=None):
         try:
             cls = self.commands[command]
         except KeyError:
-            cls, argv = self.commands["help"], ["help"]
-        cls = self.commands.get(command) or self.commands["help"]
+            cls, argv = self.commands['help'], ['help']
+        cls = self.commands.get(command) or self.commands['help']
         try:
             return cls(app=self.app).run_from_argv(self.prog_name, argv)
         except Error:
-            return self.execute("help", argv)
+            return self.execute('help', argv)
 
     def remove_options_at_beginning(self, argv, index=0):
         if argv:
             while index < len(argv):
                 value = argv[index]
-                if value.startswith("--"):
+                if value.startswith('--'):
                     pass
-                elif value.startswith("-"):
+                elif value.startswith('-'):
                     index += 1
                 else:
                     return argv[index:]
@@ -833,7 +819,7 @@ class CeleryCommand(BaseCommand):
         try:
             command = argv[0]
         except IndexError:
-            command, argv = "help", ["help"]
+            command, argv = 'help', ['help']
         return self.execute(command, argv)
 
     def execute_from_commandline(self, argv=None):
@@ -848,11 +834,11 @@ class CeleryCommand(BaseCommand):
         colored = term.colored().names[color] if color else lambda x: x
         obj = self.commands[command]
         if obj.leaf:
-            return '|' + text.indent("celery %s" % colored(command), indent)
+            return '|' + text.indent('celery %s' % colored(command), indent)
         return text.join([
-            " ",
-            '|' + text.indent("celery %s --help" % colored(command), indent),
-            obj.list_commands(indent, "celery %s" % command, colored),
+            ' ',
+            '|' + text.indent('celery %s --help' % colored(command), indent),
+            obj.list_commands(indent, 'celery %s' % command, colored),
         ])
 
     @classmethod
@@ -861,12 +847,12 @@ class CeleryCommand(BaseCommand):
         ret = []
         for cls, commands, color in command_classes:
             ret.extend([
-                text.indent("+ %s: " % white(cls), indent),
-                "\n".join(self.get_command_info(command, indent + 4, color)
+                text.indent('+ %s: ' % white(cls), indent),
+                '\n'.join(self.get_command_info(command, indent + 4, color)
                             for command in commands),
-                ""
+                ''
             ])
-        return "\n".join(ret).strip()
+        return '\n'.join(ret).strip()
 
 
 def determine_exit_status(ret):
@@ -879,10 +865,10 @@ def main():
     # Fix for setuptools generated scripts, so that it will
     # work with multiprocessing fork emulation.
     # (see multiprocessing.forking.get_preparation_data())
-    if __name__ != "__main__":  # pragma: no cover
-        sys.modules["__main__"] = sys.modules[__name__]
+    if __name__ != '__main__':  # pragma: no cover
+        sys.modules['__main__'] = sys.modules[__name__]
     freeze_support()
     CeleryCommand().execute_from_commandline()
 
-if __name__ == "__main__":          # pragma: no cover
+if __name__ == '__main__':          # pragma: no cover
     main()

+ 8 - 8
celery/bin/celerybeat.py

@@ -16,7 +16,7 @@ The :program:`celery beat` command.
 .. cmdoption:: -s, --schedule
 
     Path to the schedule database. Defaults to `celerybeat-schedule`.
-    The extension ".db" may be appended to the filename.
+    The extension '.db' may be appended to the filename.
     Default is %(default)s.
 
 .. cmdoption:: -S, --scheduler
@@ -58,7 +58,7 @@ class BeatCommand(Command):
     def run(self, detach=False, logfile=None, pidfile=None, uid=None,
             gid=None, umask=None, working_directory=None, **kwargs):
         workdir = working_directory
-        kwargs.pop("app", None)
+        kwargs.pop('app', None)
         beat = partial(self.app.Beat,
                        logfile=logfile, pidfile=pidfile, **kwargs)
 
@@ -69,7 +69,7 @@ class BeatCommand(Command):
             return beat().run()
 
     def prepare_preload_options(self, options):
-        workdir = options.get("working_directory")
+        workdir = options.get('working_directory')
         if workdir:
             os.chdir(workdir)
 
@@ -77,17 +77,17 @@ class BeatCommand(Command):
         c = self.app.conf
 
         return (
-            Option('--detach', action="store_true"),
+            Option('--detach', action='store_true'),
             Option('-s', '--schedule', default=c.CELERYBEAT_SCHEDULE_FILENAME),
-            Option('--max-interval', type="float"),
-            Option('-S', '--scheduler', dest="scheduler_cls"),
+            Option('--max-interval', type='float'),
+            Option('-S', '--scheduler', dest='scheduler_cls'),
             Option('-l', '--loglevel', default=c.CELERYBEAT_LOG_LEVEL),
-        ) + daemon_options(default_pidfile="celerybeat.pid")
+        ) + daemon_options(default_pidfile='celerybeat.pid')
 
 
 def main():
     beat = BeatCommand()
     beat.execute_from_commandline()
 
-if __name__ == "__main__":      # pragma: no cover
+if __name__ == '__main__':      # pragma: no cover
     main()

+ 1 - 1
celery/bin/celeryctl.py

@@ -12,5 +12,5 @@ from celery.bin.celery import (  # noqa
     CeleryCommand as celeryctl, Command, main,
 )
 
-if __name__ == "__main__":  # pragma: no cover
+if __name__ == '__main__':  # pragma: no cover
     main()

+ 30 - 30
celery/bin/celeryd.py

@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 """
 
-The :program:`celery worker` command (previously known as ``celeryd``).
+The :program:`celery worker` command (previously known as ``celeryd``)
 
 .. program:: celery worker
 
@@ -126,7 +126,7 @@ from celery.utils.log import LOG_LEVELS, mlevel
 
 class WorkerCommand(Command):
     doc = __doc__  # parse help from this.
-    namespace = "celeryd"
+    namespace = 'celeryd'
     enable_config_from_cmdline = True
     supports_args = False
 
@@ -143,21 +143,21 @@ class WorkerCommand(Command):
         return super(WorkerCommand, self).execute_from_commandline(argv)
 
     def run(self, *args, **kwargs):
-        kwargs.pop("app", None)
+        kwargs.pop('app', None)
         # Pools like eventlet/gevent needs to patch libs as early
         # as possible.
-        kwargs["pool_cls"] = concurrency.get_implementation(
-                    kwargs.get("pool_cls") or self.app.conf.CELERYD_POOL)
-        if self.app.IS_WINDOWS and kwargs.get("beat"):
-            self.die("-B option does not work on Windows.  "
-                     "Please run celerybeat as a separate service.")
-        loglevel = kwargs.get("loglevel")
+        kwargs['pool_cls'] = concurrency.get_implementation(
+                    kwargs.get('pool_cls') or self.app.conf.CELERYD_POOL)
+        if self.app.IS_WINDOWS and kwargs.get('beat'):
+            self.die('-B option does not work on Windows.  '
+                     'Please run celerybeat as a separate service.')
+        loglevel = kwargs.get('loglevel')
         if loglevel:
             try:
-                kwargs["loglevel"] = mlevel(loglevel)
+                kwargs['loglevel'] = mlevel(loglevel)
             except KeyError:  # pragma: no cover
-                self.die("Unknown level %r. Please use one of %s." % (
-                    loglevel, "|".join(l for l in LOG_LEVELS.keys()
+                self.die('Unknown level %r. Please use one of %s.' % (
+                    loglevel, '|'.join(l for l in LOG_LEVELS.keys()
                       if isinstance(l, basestring))))
         return self.app.Worker(**kwargs).run()
 
@@ -165,32 +165,32 @@ class WorkerCommand(Command):
         conf = self.app.conf
         return (
             Option('-c', '--concurrency',
-                default=conf.CELERYD_CONCURRENCY, type="int"),
-            Option('-P', '--pool', default=conf.CELERYD_POOL, dest="pool_cls"),
-            Option('--purge', '--discard', default=False, action="store_true"),
+                default=conf.CELERYD_CONCURRENCY, type='int'),
+            Option('-P', '--pool', default=conf.CELERYD_POOL, dest='pool_cls'),
+            Option('--purge', '--discard', default=False, action='store_true'),
             Option('-f', '--logfile', default=conf.CELERYD_LOG_FILE),
             Option('-l', '--loglevel', default=conf.CELERYD_LOG_LEVEL),
             Option('-n', '--hostname'),
-            Option('-B', '--beat', action="store_true"),
-            Option('-s', '--schedule', dest="schedule_filename",
+            Option('-B', '--beat', action='store_true'),
+            Option('-s', '--schedule', dest='schedule_filename',
                 default=conf.CELERYBEAT_SCHEDULE_FILENAME),
-            Option('--scheduler', dest="scheduler_cls"),
+            Option('--scheduler', dest='scheduler_cls'),
             Option('-S', '--statedb',
-                default=conf.CELERYD_STATE_DB, dest="state_db"),
+                default=conf.CELERYD_STATE_DB, dest='state_db'),
             Option('-E', '--events', default=conf.CELERY_SEND_EVENTS,
-                action="store_true", dest="send_events"),
-            Option('--time-limit', type="int", dest="task_time_limit",
+                action='store_true', dest='send_events'),
+            Option('--time-limit', type='int', dest='task_time_limit',
                 default=conf.CELERYD_TASK_TIME_LIMIT),
-            Option('--soft-time-limit', dest="task_soft_time_limit",
-                default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type="int"),
-            Option('--maxtasksperchild', dest="max_tasks_per_child",
-                default=conf.CELERYD_MAX_TASKS_PER_CHILD, type="int"),
+            Option('--soft-time-limit', dest='task_soft_time_limit',
+                default=conf.CELERYD_TASK_SOFT_TIME_LIMIT, type='int'),
+            Option('--maxtasksperchild', dest='max_tasks_per_child',
+                default=conf.CELERYD_MAX_TASKS_PER_CHILD, type='int'),
             Option('--queues', '-Q', default=[]),
             Option('--include', '-I', default=[]),
             Option('--pidfile'),
             Option('--autoscale'),
-            Option('--autoreload', action="store_true"),
-            Option("--no-execv", action="store_true", default=False),
+            Option('--autoreload', action='store_true'),
+            Option('--no-execv', action='store_true', default=False),
         )
 
 
@@ -198,12 +198,12 @@ def main():
     # Fix for setuptools generated scripts, so that it will
     # work with multiprocessing fork emulation.
     # (see multiprocessing.forking.get_preparation_data())
-    if __name__ != "__main__":  # pragma: no cover
-        sys.modules["__main__"] = sys.modules[__name__]
+    if __name__ != '__main__':  # pragma: no cover
+        sys.modules['__main__'] = sys.modules[__name__]
     freeze_support()
     worker = WorkerCommand()
     worker.execute_from_commandline()
 
 
-if __name__ == "__main__":          # pragma: no cover
+if __name__ == '__main__':          # pragma: no cover
     main()

+ 19 - 19
celery/bin/celeryd_detach.py

@@ -26,9 +26,9 @@ from celery.bin.base import daemon_options, Option
 
 logger = get_logger(__name__)
 
-OPTION_LIST = daemon_options(default_pidfile="celeryd.pid") + (
-                Option("--fake",
-                       default=False, action="store_true", dest="fake",
+OPTION_LIST = daemon_options(default_pidfile='celeryd.pid') + (
+                Option('--fake',
+                       default=False, action='store_true', dest='fake',
                        help="Don't fork (for debugging purposes)"), )
 
 
@@ -39,8 +39,8 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
             os.execv(path, [path] + argv)
         except Exception:
             from celery import current_app
-            current_app.log.setup_logging_subsystem("ERROR", logfile)
-            logger.critical("Can't exec %r", " ".join([path] + argv),
+            current_app.log.setup_logging_subsystem('ERROR', logfile)
+            logger.critical("Can't exec %r", ' '.join([path] + argv),
                             exc_info=True)
         return EX_FAILURE
 
@@ -54,8 +54,8 @@ class PartialOptionParser(OptionParser):
     def _process_long_opt(self, rargs, values):
         arg = rargs.pop(0)
 
-        if "=" in arg:
-            opt, next_arg = arg.split("=", 1)
+        if '=' in arg:
+            opt, next_arg = arg.split('=', 1)
             rargs.insert(0, next_arg)
             had_explicit_value = True
         else:
@@ -73,9 +73,9 @@ class PartialOptionParser(OptionParser):
                 nargs = option.nargs
                 if len(rargs) < nargs:
                     if nargs == 1:
-                        self.error("%s option requires an argument" % opt)
+                        self.error('%s option requires an argument' % opt)
                     else:
-                        self.error("%s option requires %d arguments" % (
+                        self.error('%s option requires %d arguments' % (
                                     opt, nargs))
                 elif nargs == 1:
                     value = rargs.pop(0)
@@ -84,7 +84,7 @@ class PartialOptionParser(OptionParser):
                     del rargs[0:nargs]
 
             elif had_explicit_value:
-                self.error("%s option does not take a value" % opt)
+                self.error('%s option does not take a value' % opt)
             else:
                 value = None
             option.process(opt, value, values, self)
@@ -97,19 +97,19 @@ class PartialOptionParser(OptionParser):
             OptionParser._process_short_opts(self, rargs, values)
         except BadOptionError:
             self.leftovers.append(arg)
-            if rargs and not rargs[0][0] == "-":
+            if rargs and not rargs[0][0] == '-':
                 self.leftovers.append(rargs.pop(0))
 
 
 class detached_celeryd(object):
     option_list = OPTION_LIST
-    usage = "%prog [options] [celeryd options]"
+    usage = '%prog [options] [celeryd options]'
     version = __version__
-    description = ("Detaches Celery worker nodes.  See `celeryd --help` "
-                   "for the list of supported worker arguments.")
+    description = ('Detaches Celery worker nodes.  See `celeryd --help` '
+                   'for the list of supported worker arguments.')
     command = sys.executable
     execv_path = sys.executable
-    execv_argv = ["-m", "celery.bin.celeryd"]
+    execv_argv = ['-m', 'celery.bin.celeryd']
 
     def Parser(self, prog_name):
         return PartialOptionParser(prog=prog_name,
@@ -122,9 +122,9 @@ class detached_celeryd(object):
         parser = self.Parser(prog_name)
         options, values = parser.parse_args(argv)
         if options.logfile:
-            parser.leftovers.append("--logfile=%s" % (options.logfile, ))
+            parser.leftovers.append('--logfile=%s' % (options.logfile, ))
         if options.pidfile:
-            parser.leftovers.append("--pidfile=%s" % (options.pidfile, ))
+            parser.leftovers.append('--pidfile=%s' % (options.pidfile, ))
         return options, values, parser.leftovers
 
     def execute_from_commandline(self, argv=None):
@@ -136,7 +136,7 @@ class detached_celeryd(object):
             if seen_cargs:
                 config.append(arg)
             else:
-                if arg == "--":
+                if arg == '--':
                     seen_cargs = 1
                     config.append(arg)
         prog_name = os.path.basename(argv[0])
@@ -149,5 +149,5 @@ class detached_celeryd(object):
 def main():
     detached_celeryd().execute_from_commandline()
 
-if __name__ == "__main__":  # pragma: no cover
+if __name__ == '__main__':  # pragma: no cover
     main()

+ 91 - 91
celery/bin/celeryd_multi.py

@@ -52,7 +52,7 @@ Examples
         -Q default -L:4,5 DEBUG
 
     # You can show the commands necessary to start the workers with
-    # the "show" command:
+    # the 'show' command:
     $ celeryd-multi show 10 -l INFO -Q:1-3 images,video -Q:4,5:data
         -Q default -L:4,5 DEBUG
 
@@ -109,7 +109,7 @@ from celery.utils import term
 from celery.utils.text import pluralize
 
 SIGNAMES = set(sig for sig in dir(signal)
-                        if sig.startswith("SIG") and "_" not in sig)
+                        if sig.startswith('SIG') and '_' not in sig)
 SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES)
 
 USAGE = """\
@@ -148,50 +148,50 @@ class MultiTool(object):
         self.quiet = quiet
         self.verbose = verbose
         self.no_color = no_color
-        self.prog_name = "celeryd-multi"
-        self.commands = {"start": self.start,
-                         "show": self.show,
-                         "stop": self.stop,
-                         "stop_verify": self.stop_verify,
-                         "restart": self.restart,
-                         "kill": self.kill,
-                         "names": self.names,
-                         "expand": self.expand,
-                         "get": self.get,
-                         "help": self.help}
-
-    def execute_from_commandline(self, argv, cmd="celeryd"):
+        self.prog_name = 'celeryd-multi'
+        self.commands = {'start': self.start,
+                         'show': self.show,
+                         'stop': self.stop,
+                         'stop_verify': self.stop_verify,
+                         'restart': self.restart,
+                         'kill': self.kill,
+                         'names': self.names,
+                         'expand': self.expand,
+                         'get': self.get,
+                         'help': self.help}
+
+    def execute_from_commandline(self, argv, cmd='celeryd'):
         argv = list(argv)   # don't modify callers argv.
 
         # Reserve the --nosplash|--quiet|-q/--verbose options.
-        if "--nosplash" in argv:
-            self.nosplash = argv.pop(argv.index("--nosplash"))
-        if "--quiet" in argv:
-            self.quiet = argv.pop(argv.index("--quiet"))
-        if "-q" in argv:
-            self.quiet = argv.pop(argv.index("-q"))
-        if "--verbose" in argv:
-            self.verbose = argv.pop(argv.index("--verbose"))
-        if "--no-color" in argv:
-            self.no_color = argv.pop(argv.index("--no-color"))
+        if '--nosplash' in argv:
+            self.nosplash = argv.pop(argv.index('--nosplash'))
+        if '--quiet' in argv:
+            self.quiet = argv.pop(argv.index('--quiet'))
+        if '-q' in argv:
+            self.quiet = argv.pop(argv.index('-q'))
+        if '--verbose' in argv:
+            self.verbose = argv.pop(argv.index('--verbose'))
+        if '--no-color' in argv:
+            self.no_color = argv.pop(argv.index('--no-color'))
 
         self.prog_name = os.path.basename(argv.pop(0))
-        if not argv or argv[0][0] == "-":
+        if not argv or argv[0][0] == '-':
             return self.error()
 
         try:
             self.commands[argv[0]](argv[1:], cmd)
         except KeyError:
-            self.error("Invalid command: %s" % argv[0])
+            self.error('Invalid command: %s' % argv[0])
 
         return self.retcode
 
     def say(self, m, newline=True):
-        self.fh.write("%s\n" % m if m else m)
+        self.fh.write('%s\n' % m if m else m)
 
     def names(self, argv, cmd):
         p = NamespacedOptionParser(argv)
-        self.say("\n".join(hostname
+        self.say('\n'.join(hostname
                         for hostname, _, _ in multi_args(p, cmd)))
 
     def get(self, argv, cmd):
@@ -199,13 +199,13 @@ class MultiTool(object):
         p = NamespacedOptionParser(argv[1:])
         for name, worker, _ in multi_args(p, cmd):
             if name == wanted:
-                self.say(" ".join(worker))
+                self.say(' '.join(worker))
                 return
 
     def show(self, argv, cmd):
         p = NamespacedOptionParser(argv)
-        self.note("> Starting nodes...")
-        self.say("\n".join(" ".join(worker)
+        self.note('> Starting nodes...')
+        self.say('\n'.join(' '.join(worker)
                         for _, worker, _ in multi_args(p, cmd)))
 
     def start(self, argv, cmd):
@@ -213,18 +213,18 @@ class MultiTool(object):
         p = NamespacedOptionParser(argv)
         self.with_detacher_default_options(p)
         retcodes = []
-        self.note("> Starting nodes...")
+        self.note('> Starting nodes...')
         for nodename, argv, _ in multi_args(p, cmd):
-            self.note("\t> %s: " % (nodename, ), newline=False)
+            self.note('\t> %s: ' % (nodename, ), newline=False)
             retcode = self.waitexec(argv)
             self.note(retcode and self.FAILED or self.OK)
             retcodes.append(retcode)
         self.retcode = int(any(retcodes))
 
     def with_detacher_default_options(self, p):
-        p.options.setdefault("--pidfile", "celeryd@%n.pid")
-        p.options.setdefault("--logfile", "celeryd@%n.log")
-        p.options.setdefault("--cmd", "-m celery.bin.celeryd_detach")
+        p.options.setdefault('--pidfile', 'celeryd@%n.pid')
+        p.options.setdefault('--logfile', 'celeryd@%n.log')
+        p.options.setdefault('--cmd', '-m celery.bin.celeryd_detach')
 
     def signal_node(self, nodename, pid, sig):
         try:
@@ -232,7 +232,7 @@ class MultiTool(object):
         except OSError, exc:
             if exc.errno != errno.ESRCH:
                 raise
-            self.note("Could not signal %s (%s): No such process" % (
+            self.note('Could not signal %s (%s): No such process' % (
                         nodename, pid))
             return False
         return True
@@ -257,11 +257,11 @@ class MultiTool(object):
             if callback:
                 callback(*node)
 
-        self.note(self.colored.blue("> Stopping nodes..."))
+        self.note(self.colored.blue('> Stopping nodes...'))
         for node in list(P):
             if node in P:
                 nodename, _, pid = node
-                self.note("\t> %s: %s -> %s" % (nodename,
+                self.note('\t> %s: %s -> %s' % (nodename,
                                                 SIGMAP[sig][3:],
                                                 pid))
                 if not self.signal_node(nodename, pid, sig):
@@ -270,8 +270,8 @@ class MultiTool(object):
         def note_waiting():
             left = len(P)
             if left:
-                self.note(self.colored.blue("> Waiting for %s %s..." % (
-                    left, pluralize(left, "node"))), newline=False)
+                self.note(self.colored.blue('> Waiting for %s %s...' % (
+                    left, pluralize(left, 'node'))), newline=False)
 
         if retry:
             note_waiting()
@@ -279,19 +279,19 @@ class MultiTool(object):
             while P:
                 for node in P:
                     its += 1
-                    self.note(".", newline=False)
+                    self.note('.', newline=False)
                     nodename, _, pid = node
                     if not self.node_alive(pid):
-                        self.note("\n\t> %s: %s" % (nodename, self.OK))
+                        self.note('\n\t> %s: %s' % (nodename, self.OK))
                         on_down(node)
                         note_waiting()
                         break
                 if P and not its % len(P):
                     sleep(float(retry))
-            self.note("")
+            self.note('')
 
     def getpids(self, p, cmd, callback=None):
-        pidfile_template = p.options.setdefault("--pidfile", "celeryd@%n.pid")
+        pidfile_template = p.options.setdefault('--pidfile', 'celeryd@%n.pid')
 
         nodes = []
         for nodename, argv, expander in multi_args(p, cmd):
@@ -304,7 +304,7 @@ class MultiTool(object):
             if pid:
                 nodes.append((nodename, tuple(argv), pid))
             else:
-                self.note("> %s: %s" % (nodename, self.DOWN))
+                self.note('> %s: %s' % (nodename, self.DOWN))
                 if callback:
                     callback(nodename, argv, pid)
 
@@ -314,7 +314,7 @@ class MultiTool(object):
         self.splash()
         p = NamespacedOptionParser(argv)
         for nodename, _, pid in self.getpids(p, cmd):
-            self.note("Killing node %s (%s)" % (nodename, pid))
+            self.note('Killing node %s (%s)' % (nodename, pid))
             self.signal_node(nodename, pid, signal.SIGKILL)
 
     def stop(self, argv, cmd, retry=None, callback=None):
@@ -337,7 +337,7 @@ class MultiTool(object):
 
         def on_node_shutdown(nodename, argv, pid):
             self.note(self.colored.blue(
-                "> Restarting node %s: " % nodename), newline=False)
+                '> Restarting node %s: ' % nodename), newline=False)
             retval = self.waitexec(argv)
             self.note(retval and self.FAILED or self.OK)
             retvals.append(retval)
@@ -362,24 +362,24 @@ class MultiTool(object):
 
     def usage(self):
         self.splash()
-        self.say(USAGE % {"prog_name": self.prog_name})
+        self.say(USAGE % {'prog_name': self.prog_name})
 
     def splash(self):
         if not self.nosplash:
             c = self.colored
-            self.note(c.cyan("celeryd-multi v%s" % __version__))
+            self.note(c.cyan('celeryd-multi v%s' % __version__))
 
     def waitexec(self, argv, path=sys.executable):
-        args = " ".join([path] + list(argv))
+        args = ' '.join([path] + list(argv))
         argstr = shellsplit(from_utf8(args))
         pipe = Popen(argstr, env=self.env)
-        self.info("  %s" % " ".join(argstr))
+        self.info('  %s' % ' '.join(argstr))
         retcode = pipe.wait()
         if retcode < 0:
-            self.note("* Child was terminated by signal %s" % (-retcode, ))
+            self.note('* Child was terminated by signal %s' % (-retcode, ))
             return -retcode
         elif retcode > 0:
-            self.note("* Child terminated with failure code %s" % (retcode, ))
+            self.note('* Child terminated with failure code %s' % (retcode, ))
         return retcode
 
     def error(self, msg=None):
@@ -403,18 +403,18 @@ class MultiTool(object):
 
     @cached_property
     def OK(self):
-        return str(self.colored.green("OK"))
+        return str(self.colored.green('OK'))
 
     @cached_property
     def FAILED(self):
-        return str(self.colored.red("FAILED"))
+        return str(self.colored.red('FAILED'))
 
     @cached_property
     def DOWN(self):
-        return str(self.colored.magenta("DOWN"))
+        return str(self.colored.magenta('DOWN'))
 
 
-def multi_args(p, cmd="celeryd", append="", prefix="", suffix=""):
+def multi_args(p, cmd='celeryd', append='', prefix='', suffix=''):
     names = p.values
     options = dict(p.options)
     passthrough = p.passthrough
@@ -426,26 +426,26 @@ def multi_args(p, cmd="celeryd", append="", prefix="", suffix=""):
             pass
         else:
             names = map(str, range(1, noderange + 1))
-            prefix = "celery"
-    cmd = options.pop("--cmd", cmd)
-    append = options.pop("--append", append)
-    hostname = options.pop("--hostname",
-                   options.pop("-n", socket.gethostname()))
-    prefix = options.pop("--prefix", prefix) or ""
-    suffix = options.pop("--suffix", suffix) or "." + hostname
+            prefix = 'celery'
+    cmd = options.pop('--cmd', cmd)
+    append = options.pop('--append', append)
+    hostname = options.pop('--hostname',
+                   options.pop('-n', socket.gethostname()))
+    prefix = options.pop('--prefix', prefix) or ''
+    suffix = options.pop('--suffix', suffix) or '.' + hostname
     if suffix in ('""', "''"):
-        suffix = ""
+        suffix = ''
 
     for ns_name, ns_opts in p.namespaces.items():
-        if "," in ns_name or (ranges and "-" in ns_name):
+        if ',' in ns_name or (ranges and '-' in ns_name):
             for subns in parse_ns_range(ns_name, ranges):
                 p.namespaces[subns].update(ns_opts)
             p.namespaces.pop(ns_name)
 
     for name in names:
-        this_name = options["-n"] = prefix + name + suffix
-        expand = abbreviations({"%h": this_name,
-                                "%n": name})
+        this_name = options['-n'] = prefix + name + suffix
+        expand = abbreviations({'%h': this_name,
+                                '%n': name})
         argv = ([expand(cmd)] +
                 [format_opt(opt, expand(value))
                         for opt, value in p.optmerge(name, options).items()] +
@@ -461,7 +461,7 @@ class NamespacedOptionParser(object):
         self.args = args
         self.options = {}
         self.values = []
-        self.passthrough = ""
+        self.passthrough = ''
         self.namespaces = defaultdict(lambda: {})
 
         self.parse()
@@ -471,11 +471,11 @@ class NamespacedOptionParser(object):
         pos = 0
         while pos < len(rargs):
             arg = rargs[pos]
-            if arg == "--":
-                self.passthrough = " ".join(rargs[pos:])
+            if arg == '--':
+                self.passthrough = ' '.join(rargs[pos:])
                 break
-            elif arg[0] == "-":
-                if arg[1] == "-":
+            elif arg[0] == '-':
+                if arg[1] == '-':
                     self.process_long_opt(arg[2:])
                 else:
                     value = None
@@ -488,8 +488,8 @@ class NamespacedOptionParser(object):
             pos += 1
 
     def process_long_opt(self, arg, value=None):
-        if "=" in arg:
-            arg, value = arg.split("=", 1)
+        if '=' in arg:
+            arg, value = arg.split('=', 1)
         self.add_option(arg, value, short=False)
 
     def process_short_opt(self, arg, value=None):
@@ -501,10 +501,10 @@ class NamespacedOptionParser(object):
         return dict(defaults, **self.namespaces[ns])
 
     def add_option(self, name, value, short=False, ns=None):
-        prefix = short and "-" or "--"
+        prefix = short and '-' or '--'
         dest = self.options
-        if ":" in name:
-            name, ns = name.split(":")
+        if ':' in name:
+            name, ns = name.split(':')
             dest = self.namespaces[ns]
         dest[prefix + name] = value
 
@@ -516,16 +516,16 @@ def quote(v):
 def format_opt(opt, value):
     if not value:
         return opt
-    if opt.startswith("--"):
-        return "%s=%s" % (opt, value)
-    return "%s %s" % (opt, value)
+    if opt.startswith('--'):
+        return '%s=%s' % (opt, value)
+    return '%s %s' % (opt, value)
 
 
 def parse_ns_range(ns, ranges=False):
     ret = []
-    for space in "," in ns and ns.split(",") or [ns]:
-        if ranges and "-" in space:
-            start, stop = space.split("-")
+    for space in ',' in ns and ns.split(',') or [ns]:
+        if ranges and '-' in space:
+            start, stop = space.split('-')
             x = map(str, range(int(start), int(stop) + 1))
             ret.extend(x)
         else:
@@ -547,16 +547,16 @@ def abbreviations(map):
 
 def findsig(args, default=signal.SIGTERM):
     for arg in reversed(args):
-        if len(arg) == 2 and arg[0] == "-":
+        if len(arg) == 2 and arg[0] == '-':
             try:
                 return int(arg[1])
             except ValueError:
                 pass
-        if arg[0] == "-":
-            maybe_sig = "SIG" + arg[1:]
+        if arg[0] == '-':
+            maybe_sig = 'SIG' + arg[1:]
             if maybe_sig in SIGNAMES:
                 return getattr(signal, maybe_sig)
     return default
 
-if __name__ == "__main__":              # pragma: no cover
+if __name__ == '__main__':              # pragma: no cover
     main()

+ 15 - 15
celery/bin/celeryev.py

@@ -52,7 +52,7 @@ class EvCommand(Command):
     supports_args = False
 
     def run(self, dump=False, camera=None, frequency=1.0, maxrate=None,
-            loglevel="INFO", logfile=None, prog_name="celeryev",
+            loglevel='INFO', logfile=None, prog_name='celeryev',
             pidfile=None, uid=None, gid=None, umask=None,
             working_directory=None, detach=False, **kwargs):
         self.prog_name = prog_name
@@ -69,18 +69,18 @@ class EvCommand(Command):
         return self.run_evtop()
 
     def prepare_preload_options(self, options):
-        workdir = options.get("working_directory")
+        workdir = options.get('working_directory')
         if workdir:
             os.chdir(workdir)
 
     def run_evdump(self):
         from celery.events.dumper import evdump
-        self.set_process_status("dump")
+        self.set_process_status('dump')
         return evdump(app=self.app)
 
     def run_evtop(self):
         from celery.events.cursesmon import evtop
-        self.set_process_status("top")
+        self.set_process_status('top')
         return evtop(app=self.app)
 
     def run_evcam(self, camera, logfile=None, pidfile=None, uid=None,
@@ -88,8 +88,8 @@ class EvCommand(Command):
             detach=False, **kwargs):
         from celery.events.snapshot import evcam
         workdir = working_directory
-        self.set_process_status("cam")
-        kwargs["app"] = self.app
+        self.set_process_status('cam')
+        kwargs['app'] = self.app
         cam = partial(evcam, camera,
                       logfile=logfile, pidfile=pidfile, **kwargs)
 
@@ -99,25 +99,25 @@ class EvCommand(Command):
         else:
             return cam()
 
-    def set_process_status(self, prog, info=""):
-        prog = "%s:%s" % (self.prog_name, prog)
-        info = "%s %s" % (info, strargv(sys.argv))
+    def set_process_status(self, prog, info=''):
+        prog = '%s:%s' % (self.prog_name, prog)
+        info = '%s %s' % (info, strargv(sys.argv))
         return set_process_title(prog, info=info)
 
     def get_options(self):
         return (
-            Option('-d', '--dump', action="store_true"),
+            Option('-d', '--dump', action='store_true'),
             Option('-c', '--camera'),
-            Option('--detach', action="store_true"),
-            Option('-F', '--frequency', '--freq', type="float", default=1.0),
+            Option('--detach', action='store_true'),
+            Option('-F', '--frequency', '--freq', type='float', default=1.0),
             Option('-r', '--maxrate'),
-            Option('-l', '--loglevel', default="INFO"),
-        ) + daemon_options(default_pidfile="celeryev.pid")
+            Option('-l', '--loglevel', default='INFO'),
+        ) + daemon_options(default_pidfile='celeryev.pid')
 
 
 def main():
     ev = EvCommand()
     ev.execute_from_commandline()
 
-if __name__ == "__main__":              # pragma: no cover
+if __name__ == '__main__':              # pragma: no cover
     main()

+ 52 - 52
celery/canvas.py

@@ -23,7 +23,7 @@ from celery.utils.functional import (
 )
 from celery.utils.text import truncate
 
-Chord = Proxy(lambda: current_app.tasks["celery.chord"])
+Chord = Proxy(lambda: current_app.tasks['celery.chord'])
 
 
 class _getitem_property(object):
@@ -56,9 +56,9 @@ class Signature(dict):
     arguments will be ignored and the values in the dict will be used
     instead.
 
-        >>> s = subtask("tasks.add", args=(2, 2))
+        >>> s = subtask('tasks.add', args=(2, 2))
         >>> subtask(s)
-        {"task": "tasks.add", args=(2, 2), kwargs={}, options={}}
+        {'task': 'tasks.add', args=(2, 2), kwargs={}, options={}}
 
     """
     TYPES = {}
@@ -71,7 +71,7 @@ class Signature(dict):
 
     @classmethod
     def from_dict(self, d):
-        typ = d.get("subtask_type")
+        typ = d.get('subtask_type')
         if typ:
             return self.TYPES[typ].from_dict(d)
         return Signature(d)
@@ -116,10 +116,10 @@ class Signature(dict):
 
     def clone(self, args=(), kwargs={}, **options):
         args, kwargs, options = self._merge(args, kwargs, options)
-        s = Signature.from_dict({"task": self.task, "args": args,
-                                 "kwargs": kwargs, "options": options,
-                                 "subtask_type": self.subtask_type,
-                                 "immutable": self.immutable})
+        s = Signature.from_dict({'task': self.task, 'args': args,
+                                 'kwargs': kwargs, 'options': options,
+                                 'subtask_type': self.subtask_type,
+                                 'immutable': self.immutable})
         s._type = self._type
         return s
     partial = clone
@@ -152,15 +152,15 @@ class Signature(dict):
         return value
 
     def link(self, callback):
-        return self.append_to_list_option("link", callback)
+        return self.append_to_list_option('link', callback)
 
     def link_error(self, errback):
-        return self.append_to_list_option("link_error", errback)
+        return self.append_to_list_option('link_error', errback)
 
     def flatten_links(self):
         return list(chain_from_iterable(_chain([[self]],
                 (link.flatten_links()
-                    for link in maybe_list(self.options.get("link")) or []))))
+                    for link in maybe_list(self.options.get('link')) or []))))
 
     def __or__(self, other):
         if isinstance(other, chain):
@@ -181,95 +181,95 @@ class Signature(dict):
 
     def reprcall(self, *args, **kwargs):
         args, kwargs, _ = self._merge(args, kwargs, {})
-        return reprcall(self["task"], args, kwargs)
+        return reprcall(self['task'], args, kwargs)
 
     def __repr__(self):
         return self.reprcall()
 
     @cached_property
     def type(self):
-        return self._type or current_app.tasks[self["task"]]
-    task = _getitem_property("task")
-    args = _getitem_property("args")
-    kwargs = _getitem_property("kwargs")
-    options = _getitem_property("options")
-    subtask_type = _getitem_property("subtask_type")
-    immutable = _getitem_property("immutable")
+        return self._type or current_app.tasks[self['task']]
+    task = _getitem_property('task')
+    args = _getitem_property('args')
+    kwargs = _getitem_property('kwargs')
+    options = _getitem_property('options')
+    subtask_type = _getitem_property('subtask_type')
+    immutable = _getitem_property('immutable')
 
 
 class chain(Signature):
 
     def __init__(self, *tasks, **options):
         tasks = tasks[0] if len(tasks) == 1 and is_list(tasks[0]) else tasks
-        Signature.__init__(self, "celery.chain", (), {"tasks": tasks}, options)
+        Signature.__init__(self, 'celery.chain', (), {'tasks': tasks}, options)
         self.tasks = tasks
-        self.subtask_type = "chain"
+        self.subtask_type = 'chain'
 
     def __call__(self, *args, **kwargs):
         return self.apply_async(*args, **kwargs)
 
     @classmethod
     def from_dict(self, d):
-        return chain(*d["kwargs"]["tasks"], **kwdict(d["options"]))
+        return chain(*d['kwargs']['tasks'], **kwdict(d['options']))
 
     def __repr__(self):
-        return " | ".join(map(repr, self.tasks))
+        return ' | '.join(map(repr, self.tasks))
 Signature.register_type(chain)
 
 
 class _basemap(Signature):
     _task_name = None
-    _unpack_args = itemgetter("task", "it")
+    _unpack_args = itemgetter('task', 'it')
 
     def __init__(self, task, it, **options):
         Signature.__init__(self, self._task_name, (),
-                {"task": task, "it": regen(it)}, **options)
+                {'task': task, 'it': regen(it)}, **options)
 
     def apply_async(self, args=(), kwargs={}, **opts):
         # need to evaluate generators
         task, it = self._unpack_args(self.kwargs)
         return self.type.apply_async((),
-                {"task": task, "it": list(it)}, **opts)
+                {'task': task, 'it': list(it)}, **opts)
 
     @classmethod
     def from_dict(self, d):
-        return chunks(*self._unpack_args(d["kwargs"]), **d["options"])
+        return chunks(*self._unpack_args(d['kwargs']), **d['options'])
 
 
 class xmap(_basemap):
-    _task_name = "celery.map"
+    _task_name = 'celery.map'
 
     def __repr__(self):
         task, it = self._unpack_args(self.kwargs)
-        return "[%s(x) for x in %s]" % (task.task, truncate(repr(it), 100))
+        return '[%s(x) for x in %s]' % (task.task, truncate(repr(it), 100))
 Signature.register_type(xmap)
 
 
 class xstarmap(_basemap):
-    _task_name = "celery.starmap"
+    _task_name = 'celery.starmap'
 
     def __repr__(self):
         task, it = self._unpack_args(self.kwargs)
-        return "[%s(*x) for x in %s]" % (task.task, truncate(repr(it), 100))
+        return '[%s(*x) for x in %s]' % (task.task, truncate(repr(it), 100))
 Signature.register_type(xstarmap)
 
 
 class chunks(Signature):
-    _unpack_args = itemgetter("task", "it", "n")
+    _unpack_args = itemgetter('task', 'it', 'n')
 
     def __init__(self, task, it, n, **options):
-        Signature.__init__(self, "celery.chunks", (),
-                {"task": task, "it": regen(it), "n": n}, **options)
+        Signature.__init__(self, 'celery.chunks', (),
+                {'task': task, 'it': regen(it), 'n': n}, **options)
 
     @classmethod
     def from_dict(self, d):
-        return chunks(*self._unpack_args(d["kwargs"]), **d["options"])
+        return chunks(*self._unpack_args(d['kwargs']), **d['options'])
 
     def apply_async(self, args=(), kwargs={}, **opts):
         # need to evaluate generators
         task, it, n = self._unpack_args(self.kwargs)
         return self.type.apply_async((),
-                {"task": task, "it": list(it), "n": n}, **opts)
+                {'task': task, 'it': list(it), 'n': n}, **opts)
 
     def __call__(self, **options):
         return self.group()(**options)
@@ -289,12 +289,12 @@ class group(Signature):
     def __init__(self, *tasks, **options):
         tasks = regen(tasks[0] if len(tasks) == 1 and is_list(tasks[0])
                                else tasks)
-        Signature.__init__(self, "celery.group", (), {"tasks": tasks}, options)
-        self.tasks, self.subtask_type = tasks, "group"
+        Signature.__init__(self, 'celery.group', (), {'tasks': tasks}, options)
+        self.tasks, self.subtask_type = tasks, 'group'
 
     @classmethod
     def from_dict(self, d):
-        return group(d["kwargs"]["tasks"], **kwdict(d["options"]))
+        return group(d['kwargs']['tasks'], **kwdict(d['options']))
 
     def __call__(self, **options):
         tasks, result, gid = self.type.prepare(options,
@@ -316,23 +316,23 @@ class chord(Signature):
     Chord = Chord
 
     def __init__(self, header, body=None, **options):
-        Signature.__init__(self, "celery.chord", (),
-                         {"header": regen(header),
-                          "body": maybe_subtask(body)}, options)
-        self.subtask_type = "chord"
+        Signature.__init__(self, 'celery.chord', (),
+                         {'header': regen(header),
+                          'body': maybe_subtask(body)}, options)
+        self.subtask_type = 'chord'
 
     @classmethod
     def from_dict(self, d):
-        kwargs = d["kwargs"]
-        return chord(kwargs["header"], kwargs.get("body"),
-                     **kwdict(d["options"]))
+        kwargs = d['kwargs']
+        return chord(kwargs['header'], kwargs.get('body'),
+                     **kwdict(d['options']))
 
     def __call__(self, body=None, **options):
         _chord = self.Chord
-        self.kwargs["body"] = body or self.kwargs["body"]
+        self.kwargs['body'] = body or self.kwargs['body']
         if _chord.app.conf.CELERY_ALWAYS_EAGER:
             return self.apply((), {}, **options)
-        callback_id = body.options.setdefault("task_id", uuid())
+        callback_id = body.options.setdefault('task_id', uuid())
         _chord(**self.kwargs)
         return _chord.AsyncResult(callback_id)
 
@@ -340,7 +340,7 @@ class chord(Signature):
         s = Signature.clone(self, *args, **kwargs)
         # need to make copy of body
         try:
-            s.kwargs["body"] = s.kwargs["body"].clone()
+            s.kwargs['body'] = s.kwargs['body'].clone()
         except (AttributeError, KeyError):
             pass
         return s
@@ -356,15 +356,15 @@ class chord(Signature):
     def __repr__(self):
         if self.body:
             return self.body.reprcall(self.tasks)
-        return "<chord without body: %r>" % (self.tasks, )
+        return '<chord without body: %r>' % (self.tasks, )
 
     @property
     def tasks(self):
-        return self.kwargs["header"]
+        return self.kwargs['header']
 
     @property
     def body(self):
-        return self.kwargs.get("body")
+        return self.kwargs.get('body')
 Signature.register_type(chord)
 
 

+ 5 - 5
celery/concurrency/__init__.py

@@ -11,11 +11,11 @@ from __future__ import absolute_import
 from celery.utils.imports import symbol_by_name
 
 ALIASES = {
-    "processes": "celery.concurrency.processes:TaskPool",
-    "eventlet": "celery.concurrency.eventlet:TaskPool",
-    "gevent": "celery.concurrency.gevent:TaskPool",
-    "threads": "celery.concurrency.threads:TaskPool",
-    "solo": "celery.concurrency.solo:TaskPool",
+    'processes': 'celery.concurrency.processes:TaskPool',
+    'eventlet': 'celery.concurrency.eventlet:TaskPool',
+    'gevent': 'celery.concurrency.gevent:TaskPool',
+    'threads': 'celery.concurrency.threads:TaskPool',
+    'solo': 'celery.concurrency.solo:TaskPool',
 }
 
 

+ 4 - 4
celery/concurrency/base.py

@@ -17,7 +17,7 @@ from kombu.utils.encoding import safe_repr
 from celery.utils import timer2
 from celery.utils.log import get_logger
 
-logger = get_logger("celery.concurrency")
+logger = get_logger('celery.concurrency')
 
 
 def apply_target(target, args=(), kwargs={}, callback=None,
@@ -88,11 +88,11 @@ class BasePool(object):
 
     def terminate_job(self, pid):
         raise NotImplementedError(
-                "%s does not implement kill_job" % (self.__class__, ))
+                '%s does not implement kill_job' % (self.__class__, ))
 
     def restart(self):
         raise NotImplementedError(
-                "%s does not implement restart" % (self.__class__, ))
+                '%s does not implement restart' % (self.__class__, ))
 
     def stop(self):
         self.on_stop()
@@ -124,7 +124,7 @@ class BasePool(object):
 
         """
         if self._does_debug:
-            logger.debug("TaskPool: Apply %s (args:%s kwargs:%s)",
+            logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)',
                          target, safe_repr(args), safe_repr(kwargs))
 
         return self.on_apply(target, args, kwargs,

+ 1 - 1
celery/concurrency/eventlet.py

@@ -9,7 +9,7 @@
 from __future__ import absolute_import
 
 import os
-if not os.environ.get("EVENTLET_NOPATCH"):
+if not os.environ.get('EVENTLET_NOPATCH'):
     import eventlet
     import eventlet.debug
     eventlet.monkey_patch()

+ 1 - 1
celery/concurrency/gevent.py

@@ -9,7 +9,7 @@
 from __future__ import absolute_import
 
 import os
-if not os.environ.get("GEVENT_NOPATCH"):
+if not os.environ.get('GEVENT_NOPATCH'):
     from gevent import monkey
     monkey.patch_all()
 

+ 17 - 17
celery/concurrency/processes/__init__.py

@@ -22,7 +22,7 @@ from celery.concurrency.base import BasePool
 from celery.task import trace
 from billiard.pool import Pool, RUN, CLOSE
 
-if platform.system() == "Windows":  # pragma: no cover
+if platform.system() == 'Windows':  # pragma: no cover
     # On Windows os.kill calls TerminateProcess which cannot be
     # handled by # any process, so this is needed to terminate the task
     # *and its children* (if any).
@@ -31,14 +31,14 @@ else:
     from os import kill as _kill                 # noqa
 
 #: List of signals to reset when a child process starts.
-WORKER_SIGRESET = frozenset(["SIGTERM",
-                             "SIGHUP",
-                             "SIGTTIN",
-                             "SIGTTOU",
-                             "SIGUSR1"])
+WORKER_SIGRESET = frozenset(['SIGTERM',
+                             'SIGHUP',
+                             'SIGTTIN',
+                             'SIGTTOU',
+                             'SIGUSR1'])
 
 #: List of signals to ignore when a child process starts.
-WORKER_SIGIGNORE = frozenset(["SIGINT"])
+WORKER_SIGIGNORE = frozenset(['SIGINT'])
 
 
 def process_initializer(app, hostname):
@@ -48,14 +48,14 @@ def process_initializer(app, hostname):
     trace._tasks = app._tasks  # make sure this optimization is set.
     platforms.signals.reset(*WORKER_SIGRESET)
     platforms.signals.ignore(*WORKER_SIGIGNORE)
-    platforms.set_mp_process_title("celery", hostname=hostname)
+    platforms.set_mp_process_title('celery', hostname=hostname)
     # This is for Windows and other platforms not supporting
     # fork(). Note that init_worker makes sure it's only
     # run once per process.
-    app.log.setup(int(os.environ.get("CELERY_LOG_LEVEL", 0)),
-                  os.environ.get("CELERY_LOG_FILE") or None,
-                  bool(os.environ.get("CELERY_LOG_REDIRECT", False)),
-                  str(os.environ.get("CELERY_LOG_REDIRECT_LEVEL")))
+    app.log.setup(int(os.environ.get('CELERY_LOG_LEVEL', 0)),
+                  os.environ.get('CELERY_LOG_FILE') or None,
+                  bool(os.environ.get('CELERY_LOG_REDIRECT', False)),
+                  str(os.environ.get('CELERY_LOG_REDIRECT_LEVEL')))
     app.loader.init_worker()
     app.loader.init_worker_process()
     app.finalize()
@@ -117,11 +117,11 @@ class TaskPool(BasePool):
         self._pool.restart()
 
     def _get_info(self):
-        return {"max-concurrency": self.limit,
-                "processes": [p.pid for p in self._pool._pool],
-                "max-tasks-per-child": self._pool._maxtasksperchild,
-                "put-guarded-by-semaphore": self.putlocks,
-                "timeouts": (self._pool.soft_timeout, self._pool.timeout)}
+        return {'max-concurrency': self.limit,
+                'processes': [p.pid for p in self._pool._pool],
+                'max-tasks-per-child': self._pool._maxtasksperchild,
+                'put-guarded-by-semaphore': self.putlocks,
+                'timeouts': (self._pool.soft_timeout, self._pool.timeout)}
 
     def init_callbacks(self, **kwargs):
         for k, v in kwargs.iteritems():

+ 5 - 5
celery/concurrency/solo.py

@@ -21,8 +21,8 @@ class TaskPool(BasePool):
         self.on_apply = apply_target
 
     def _get_info(self):
-        return {"max-concurrency": 1,
-                "processes": [os.getpid()],
-                "max-tasks-per-child": None,
-                "put-guarded-by-semaphore": True,
-                "timeouts": ()}
+        return {'max-concurrency': 1,
+                'processes': [os.getpid()],
+                'max-tasks-per-child': None,
+                'put-guarded-by-semaphore': True,
+                'timeouts': ()}

+ 1 - 1
celery/concurrency/threads.py

@@ -26,7 +26,7 @@ class TaskPool(BasePool):
             import threadpool
         except ImportError:
             raise ImportError(
-                    "The threaded pool requires the threadpool module.")
+                    'The threaded pool requires the threadpool module.')
         self.WorkRequest = threadpool.WorkRequest
         self.ThreadPool = threadpool.ThreadPool
         super(TaskPool, self).__init__(*args, **kwargs)

+ 4 - 4
celery/contrib/abortable.py

@@ -41,11 +41,11 @@ In the consumer:
                    if self.is_aborted(**kwargs):
                        # Respect the aborted status and terminate
                        # gracefully
-                       logger.warning("Task aborted.")
+                       logger.warning('Task aborted.')
                        return
                y = do_something_expensive(x)
                results.append(y)
-           logger.info("Task finished.")
+           logger.info('Task finished.')
            return results
 
 
@@ -98,14 +98,14 @@ Task is aborted (typically by the producer) and should be
 aborted as soon as possible.
 
 """
-ABORTED = "ABORTED"
+ABORTED = 'ABORTED'
 
 
 class AbortableAsyncResult(AsyncResult):
     """Represents a abortable result.
 
     Specifically, this gives the `AsyncResult` a :meth:`abort()` method,
-    which sets the state of the underlying Task to `"ABORTED"`.
+    which sets the state of the underlying Task to `'ABORTED'`.
 
     """
 

+ 8 - 8
celery/contrib/batches.py

@@ -19,13 +19,13 @@ A click counter that flushes the buffer every 100 messages, and every
     @task(base=Batches, flush_every=100, flush_interval=10)
     def count_click(requests):
         from collections import Counter
-        count = Counter(request.kwargs["url"] for request in requests)
+        count = Counter(request.kwargs['url'] for request in requests)
         for url, count in count.items():
-            print(">>> Clicks: %s -> %s" % (url, count))
+            print('>>> Clicks: %s -> %s' % (url, count))
 
 Registering the click is done as follows:
 
-    >>> count_click.delay(url="http://example.com")
+    >>> count_click.delay(url='http://example.com')
 
 .. warning::
 
@@ -81,7 +81,7 @@ def apply_batches_task(task, args, loglevel, logfile):
         result = task(*args)
     except Exception, exc:
         result = None
-        task.logger.error("Error: %r", exc, exc_info=True)
+        task.logger.error('Error: %r', exc, exc_info=True)
     finally:
         task.pop_request()
     return result
@@ -139,7 +139,7 @@ class Batches(Task):
         self._logging = None
 
     def run(self, requests):
-        raise NotImplementedError("%r must implement run(requests)" % (self, ))
+        raise NotImplementedError('%r must implement run(requests)' % (self, ))
 
     def flush(self, requests):
         return self.apply_buffer(requests, ([SimpleRequest.from_request(r)
@@ -162,15 +162,15 @@ class Batches(Task):
             self._do_flush()
 
     def _do_flush(self):
-        logger.debug("Batches: Wake-up to flush buffer...")
+        logger.debug('Batches: Wake-up to flush buffer...')
         requests = None
         if self._buffer.qsize():
             requests = list(consume_queue(self._buffer))
             if requests:
-                logger.debug("Batches: Buffer complete: %s", len(requests))
+                logger.debug('Batches: Buffer complete: %s', len(requests))
                 self.flush(requests)
         if not requests:
-            logger.debug("Batches: Cancelling timer: Nothing in buffer.")
+            logger.debug('Batches: Cancelling timer: Nothing in buffer.')
             self._tref.cancel()  # cancel timer.
             self._tref = None
 

+ 31 - 31
celery/contrib/bundles.py

@@ -12,38 +12,38 @@ from celery import VERSION
 from bundle.extensions import Dist
 
 
-defaults = {"author": "Celery Project",
-            "author_email": "bundles@celeryproject.org",
-            "url": "http://celeryproject.org",
-            "license": "BSD"}
-celery = Dist("celery", VERSION, **defaults)
-django_celery = Dist("django-celery", VERSION, **defaults)
-flask_celery = Dist("Flask-Celery", VERSION, **defaults)
+defaults = {'author': 'Celery Project',
+            'author_email': 'bundles@celeryproject.org',
+            'url': 'http://celeryproject.org',
+            'license': 'BSD'}
+celery = Dist('celery', VERSION, **defaults)
+django_celery = Dist('django-celery', VERSION, **defaults)
+flask_celery = Dist('Flask-Celery', VERSION, **defaults)
 
 bundles = [
-    celery.Bundle("celery-with-redis",
-        "Bundle installing the dependencies for Celery and Redis",
-        requires=["redis>=2.4.4"]),
-    celery.Bundle("celery-with-mongodb",
-        "Bundle installing the dependencies for Celery and MongoDB",
-        requires=["pymongo"]),
-    celery.Bundle("celery-with-couchdb",
-        "Bundle installing the dependencies for Celery and CouchDB",
-        requires=["couchdb"]),
-    celery.Bundle("celery-with-beanstalk",
-        "Bundle installing the dependencies for Celery and Beanstalk",
-        requires=["beanstalkc"]),
+    celery.Bundle('celery-with-redis',
+        'Bundle installing the dependencies for Celery and Redis',
+        requires=['redis>=2.4.4']),
+    celery.Bundle('celery-with-mongodb',
+        'Bundle installing the dependencies for Celery and MongoDB',
+        requires=['pymongo']),
+    celery.Bundle('celery-with-couchdb',
+        'Bundle installing the dependencies for Celery and CouchDB',
+        requires=['couchdb']),
+    celery.Bundle('celery-with-beanstalk',
+        'Bundle installing the dependencies for Celery and Beanstalk',
+        requires=['beanstalkc']),
 
-    django_celery.Bundle("django-celery-with-redis",
-        "Bundle installing the dependencies for Django-Celery and Redis",
-        requires=["redis>=2.4.4"]),
-    django_celery.Bundle("django-celery-with-mongodb",
-        "Bundle installing the dependencies for Django-Celery and MongoDB",
-        requires=["pymongo"]),
-    django_celery.Bundle("django-celery-with-couchdb",
-        "Bundle installing the dependencies for Django-Celery and CouchDB",
-        requires=["couchdb"]),
-    django_celery.Bundle("django-celery-with-beanstalk",
-        "Bundle installing the dependencies for Django-Celery and Beanstalk",
-        requires=["beanstalkc"]),
+    django_celery.Bundle('django-celery-with-redis',
+        'Bundle installing the dependencies for Django-Celery and Redis',
+        requires=['redis>=2.4.4']),
+    django_celery.Bundle('django-celery-with-mongodb',
+        'Bundle installing the dependencies for Django-Celery and MongoDB',
+        requires=['pymongo']),
+    django_celery.Bundle('django-celery-with-couchdb',
+        'Bundle installing the dependencies for Django-Celery and CouchDB',
+        requires=['couchdb']),
+    django_celery.Bundle('django-celery-with-beanstalk',
+        'Bundle installing the dependencies for Django-Celery and Beanstalk',
+        requires=['beanstalkc']),
 ]

+ 2 - 2
celery/contrib/methods.py

@@ -58,12 +58,12 @@ Caveats
     .. code-block:: python
 
         class A(object):
-            @task(name="A.add")
+            @task(name='A.add')
             def add(self, x, y):
                 return x + y
 
         class B(object):
-            @task(name="B.add")
+            @task(name='B.add')
             def add(self, x, y):
                 return x + y
 

+ 7 - 7
celery/contrib/migrate.py

@@ -27,14 +27,14 @@ class State(object):
     @property
     def strtotal(self):
         if not self.total_apx:
-            return u"?"
+            return u'?'
         return unicode(self.total_apx)
 
 
 def migrate_task(producer, body_, message,
-        remove_props=["application_headers",
-                      "content_type",
-                      "content_encoding"]):
+        remove_props=['application_headers',
+                      'content_type',
+                      'content_encoding']):
     body = ensure_bytes(message.body)  # use raw message body.
     info, headers, props = (message.delivery_info,
                             message.headers,
@@ -42,13 +42,13 @@ def migrate_task(producer, body_, message,
     ctype, enc = message.content_type, message.content_encoding
     # remove compression header, as this will be inserted again
     # when the message is recompressed.
-    compression = headers.pop("compression", None)
+    compression = headers.pop('compression', None)
 
     for key in remove_props:
         props.pop(key, None)
 
-    producer.publish(ensure_bytes(body), exchange=info["exchange"],
-                           routing_key=info["routing_key"],
+    producer.publish(ensure_bytes(body), exchange=info['exchange'],
+                           routing_key=info['routing_key'],
                            compression=compression,
                            headers=headers,
                            content_type=ctype,

+ 17 - 17
celery/contrib/rdb.py

@@ -47,17 +47,17 @@ from billiard import current_process
 
 default_port = 6899
 
-CELERY_RDB_HOST = os.environ.get("CELERY_RDB_HOST") or "127.0.0.1"
-CELERY_RDB_PORT = int(os.environ.get("CELERY_RDB_PORT") or default_port)
+CELERY_RDB_HOST = os.environ.get('CELERY_RDB_HOST') or '127.0.0.1'
+CELERY_RDB_PORT = int(os.environ.get('CELERY_RDB_PORT') or default_port)
 
 #: Holds the currently active debugger.
 _current = [None]
 
-_frame = getattr(sys, "_getframe")
+_frame = getattr(sys, '_getframe')
 
 
 class Rdb(Pdb):
-    me = "Remote Debugger"
+    me = 'Remote Debugger'
     _prev_outs = None
     _sock = None
 
@@ -71,17 +71,17 @@ class Rdb(Pdb):
         self._sock, this_port = self.get_avail_port(host, port,
             port_search_limit, port_skew)
         self._sock.listen(1)
-        me = "%s:%s" % (self.me, this_port)
-        context = self.context = {"me": me, "host": host, "port": this_port}
-        self.say("%(me)s: Please telnet %(host)s %(port)s."
-                 "  Type `exit` in session to continue." % context)
-        self.say("%(me)s: Waiting for client..." % context)
+        me = '%s:%s' % (self.me, this_port)
+        context = self.context = {'me': me, 'host': host, 'port': this_port}
+        self.say('%(me)s: Please telnet %(host)s %(port)s.'
+                 '  Type `exit` in session to continue.' % context)
+        self.say('%(me)s: Waiting for client...' % context)
 
         self._client, address = self._sock.accept()
-        context["remote_addr"] = ":".join(map(str, address))
-        self.say("%(me)s: In session with %(remote_addr)s" % context)
-        self._handle = sys.stdin = sys.stdout = self._client.makefile("rw")
-        Pdb.__init__(self, completekey="tab",
+        context['remote_addr'] = ':'.join(map(str, address))
+        self.say('%(me)s: In session with %(remote_addr)s' % context)
+        self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
+        Pdb.__init__(self, completekey='tab',
                            stdin=self._handle, stdout=self._handle)
 
     def get_avail_port(self, host, port, search_limit=100, skew=+0):
@@ -104,11 +104,11 @@ class Rdb(Pdb):
                 return _sock, this_port
         else:
             raise Exception(
-                "%s: Could not find available port. Please set using "
-                "environment variable CELERY_RDB_PORT" % (self.me, ))
+                '%s: Could not find available port. Please set using '
+                'environment variable CELERY_RDB_PORT' % (self.me, ))
 
     def say(self, m):
-        self.out.write(m + "\n")
+        self.out.write(m + '\n')
 
     def _close_session(self):
         self.stdin, self.stdout = sys.stdin, sys.stdout = self._prev_handles
@@ -116,7 +116,7 @@ class Rdb(Pdb):
         self._client.close()
         self._sock.close()
         self.active = False
-        self.say("%(me)s: Session %(remote_addr)s ended." % self.context)
+        self.say('%(me)s: Session %(remote_addr)s ended.' % self.context)
 
     def do_continue(self, arg):
         self._close_session()

+ 7 - 7
celery/datastructures.py

@@ -158,19 +158,19 @@ class DependencyGraph(object):
 
         return result
 
-    def to_dot(self, fh, ws=" " * 4):
+    def to_dot(self, fh, ws=' ' * 4):
         """Convert the graph to DOT format.
 
         :param fh: A file, or a file-like object to write the graph to.
 
         """
-        fh.write("digraph dependencies {\n")
+        fh.write('digraph dependencies {\n')
         for obj, adjacent in self.iteritems():
             if not adjacent:
                 fh.write(ws + '"%s"\n' % (obj, ))
             for req in adjacent:
                 fh.write(ws + '"%s" -> "%s"\n' % (obj, req))
-        fh.write("}\n")
+        fh.write('}\n')
 
     def __iter__(self):
         return self.adjacent.iterkeys()
@@ -192,10 +192,10 @@ class DependencyGraph(object):
         return '\n'.join(self.repr_node(N) for N in self)
 
     def repr_node(self, obj, level=1):
-        output = ["%s(%s)" % (obj, self.valency_of(obj))]
+        output = ['%s(%s)' % (obj, self.valency_of(obj))]
         if obj in self:
             for other in self[obj]:
-                d = "%s(%s)" % (other, self.valency_of(other))
+                d = '%s(%s)' % (other, self.valency_of(other))
                 output.append('     ' * level + d)
                 output.extend(self.repr_node(other, level + 1).split('\n')[1:])
         return '\n'.join(output)
@@ -383,7 +383,7 @@ class LimitedSet(object):
     :keyword expires: Time in seconds, before a membership expires.
 
     """
-    __slots__ = ("maxlen", "expires", "_data", "__len__")
+    __slots__ = ('maxlen', 'expires', '_data', '__len__')
 
     def __init__(self, maxlen=None, expires=None):
         self.maxlen = maxlen
@@ -433,7 +433,7 @@ class LimitedSet(object):
         return iter(self._data)
 
     def __repr__(self):
-        return "LimitedSet(%r)" % (self._data.keys(), )
+        return 'LimitedSet(%r)' % (self._data.keys(), )
 
     @property
     def chronologically(self):

+ 16 - 16
celery/events/__init__.py

@@ -27,14 +27,14 @@ from kombu.utils import cached_property
 from celery.app import app_or_default
 from celery.utils import uuid
 
-event_exchange = Exchange("celeryev", type="topic")
+event_exchange = Exchange('celeryev', type='topic')
 
 
 def get_exchange(conn):
     ex = copy(event_exchange)
-    if conn.transport.driver_type == "redis":
+    if conn.transport.driver_type == 'redis':
         # quick hack for Issue #436
-        ex.type = "fanout"
+        ex.type = 'fanout'
     return ex
 
 
@@ -45,8 +45,8 @@ def Event(type, _fields=None, **fields):
 
     """
     event = dict(_fields or {}, type=type, **fields)
-    if "timestamp" not in event:
-        event["timestamp"] = time.time()
+    if 'timestamp' not in event:
+        event['timestamp'] = time.time()
     return event
 
 
@@ -133,7 +133,7 @@ class EventDispatcher(object):
                                     clock=self.app.clock.forward(), **fields)
                 try:
                     self.publisher.publish(event,
-                                           routing_key=type.replace("-", "."))
+                                           routing_key=type.replace('-', '.'))
                 except Exception, exc:
                     if not self.buffer_while_offline:
                         raise
@@ -169,8 +169,8 @@ class EventReceiver(object):
     """
     handlers = {}
 
-    def __init__(self, connection, handlers=None, routing_key="#",
-            node_id=None, app=None, queue_prefix="celeryev"):
+    def __init__(self, connection, handlers=None, routing_key='#',
+            node_id=None, app=None, queue_prefix='celeryev'):
         self.app = app_or_default(app)
         self.connection = connection
         if handlers is not None:
@@ -190,7 +190,7 @@ class EventReceiver(object):
     def process(self, type, event):
         """Process the received event by dispatching it to the appropriate
         handler."""
-        handler = self.handlers.get(type) or self.handlers.get("*")
+        handler = self.handlers.get(type) or self.handlers.get('*')
         handler and handler(event)
 
     @contextmanager
@@ -219,7 +219,7 @@ class EventReceiver(object):
         list(self.itercapture(limit=limit, timeout=timeout, wakeup=wakeup))
 
     def wakeup_workers(self, channel=None):
-        self.app.control.broadcast("heartbeat",
+        self.app.control.broadcast('heartbeat',
                                    connection=self.connection,
                                    channel=channel)
 
@@ -228,8 +228,8 @@ class EventReceiver(object):
             pass
 
     def _receive(self, body, message):
-        type = body.pop("type").lower()
-        clock = body.get("clock")
+        type = body.pop('type').lower()
+        clock = body.get('clock')
         if clock:
             self.app.clock.adjust(clock)
         self.process(type, Event(type, body))
@@ -243,17 +243,17 @@ class Events(object):
     @cached_property
     def Receiver(self):
         return self.app.subclass_with_self(EventReceiver,
-                                           reverse="events.Receiver")
+                                           reverse='events.Receiver')
 
     @cached_property
     def Dispatcher(self):
         return self.app.subclass_with_self(EventDispatcher,
-                                           reverse="events.Dispatcher")
+                                           reverse='events.Dispatcher')
 
     @cached_property
     def State(self):
-        return self.app.subclass_with_self("celery.events.state:State",
-                                           reverse="events.State")
+        return self.app.subclass_with_self('celery.events.state:State',
+                                           reverse='events.State')
 
     @contextmanager
     def default_dispatcher(self, hostname=None, enabled=True,

+ 63 - 63
celery/events/cursesmon.py

@@ -43,26 +43,26 @@ class CursesMonitor(object):  # pragma: no cover
     screen_delay = 10
     selected_task = None
     selected_position = 0
-    selected_str = "Selected: "
+    selected_str = 'Selected: '
     foreground = curses.COLOR_BLACK
     background = curses.COLOR_WHITE
-    online_str = "Workers online: "
-    help_title = "Keys: "
-    help = ("j:up k:down i:info t:traceback r:result c:revoke ^c: quit")
-    greet = "celeryev %s" % __version__
-    info_str = "Info: "
+    online_str = 'Workers online: '
+    help_title = 'Keys: '
+    help = ('j:up k:down i:info t:traceback r:result c:revoke ^c: quit')
+    greet = 'celeryev %s' % __version__
+    info_str = 'Info: '
 
     def __init__(self, state, keymap=None, app=None):
         self.app = app_or_default(app)
         self.keymap = keymap or self.keymap
         self.state = state
-        default_keymap = {"J": self.move_selection_down,
-                          "K": self.move_selection_up,
-                          "C": self.revoke_selection,
-                          "T": self.selection_traceback,
-                          "R": self.selection_result,
-                          "I": self.selection_info,
-                          "L": self.selection_rate_limit}
+        default_keymap = {'J': self.move_selection_down,
+                          'K': self.move_selection_up,
+                          'C': self.revoke_selection,
+                          'T': self.selection_traceback,
+                          'R': self.selection_result,
+                          'I': self.selection_info,
+                          'L': self.selection_rate_limit}
         self.keymap = dict(default_keymap, **self.keymap)
 
     def format_row(self, uuid, task, worker, timestamp, state):
@@ -87,7 +87,7 @@ class CursesMonitor(object):  # pragma: no cover
         state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
         timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
 
-        row = "%s %s %s %s %s " % (uuid, worker, task, timestamp, state)
+        row = '%s %s %s %s %s ' % (uuid, worker, task, timestamp, state)
         if self.screen_width is None:
             self.screen_width = len(row[:mx])
         return row[:mx]
@@ -139,9 +139,9 @@ class CursesMonitor(object):  # pragma: no cover
         except IndexError:
             self.selected_task = self.tasks[0][0]
 
-    keyalias = {curses.KEY_DOWN: "J",
-                curses.KEY_UP: "K",
-                curses.KEY_ENTER: "I"}
+    keyalias = {curses.KEY_DOWN: 'J',
+                curses.KEY_UP: 'K',
+                curses.KEY_ENTER: 'I'}
 
     def handle_keypress(self):
         try:
@@ -161,7 +161,7 @@ class CursesMonitor(object):  # pragma: no cover
             self.win.addstr(y(), 3, title, curses.A_BOLD | curses.A_UNDERLINE)
             blank_line()
         callback(my, mx, y())
-        self.win.addstr(my - 1, 0, "Press any key to continue...",
+        self.win.addstr(my - 1, 0, 'Press any key to continue...',
                         curses.A_BOLD)
         self.win.refresh()
         while 1:
@@ -178,9 +178,9 @@ class CursesMonitor(object):  # pragma: no cover
             return curses.beep()
 
         my, mx = self.win.getmaxyx()
-        r = "New rate limit: "
+        r = 'New rate limit: '
         self.win.addstr(my - 2, 3, r, curses.A_BOLD | curses.A_UNDERLINE)
-        self.win.addstr(my - 2, len(r) + 3, " " * (mx - len(r)))
+        self.win.addstr(my - 2, len(r) + 3, ' ' * (mx - len(r)))
         rlimit = self.readline(my - 2, 3 + len(r))
 
         if rlimit:
@@ -193,7 +193,7 @@ class CursesMonitor(object):  # pragma: no cover
         def callback(my, mx, xs):
             y = count(xs).next
             if not reply:
-                self.win.addstr(y(), 3, "No replies received in 1s deadline.",
+                self.win.addstr(y(), 3, 'No replies received in 1s deadline.',
                         curses.A_BOLD + curses.color_pair(2))
                 return
 
@@ -201,19 +201,19 @@ class CursesMonitor(object):  # pragma: no cover
                 curline = y()
 
                 host, response = subreply.items()[0]
-                host = "%s: " % host
+                host = '%s: ' % host
                 self.win.addstr(curline, 3, host, curses.A_BOLD)
                 attr = curses.A_NORMAL
-                text = ""
-                if "error" in response:
-                    text = response["error"]
+                text = ''
+                if 'error' in response:
+                    text = response['error']
                     attr |= curses.color_pair(2)
-                elif "ok" in response:
-                    text = response["ok"]
+                elif 'ok' in response:
+                    text = response['ok']
                     attr |= curses.color_pair(3)
                 self.win.addstr(curline, 3 + len(host), text, attr)
 
-        return self.alert(callback, "Remote Control Command Replies")
+        return self.alert(callback, 'Remote Control Command Replies')
 
     def readline(self, x, y):
         buffer = str()
@@ -248,15 +248,15 @@ class CursesMonitor(object):  # pragma: no cover
             my, mx = self.win.getmaxyx()
             y = count(xs).next
             task = self.state.tasks[self.selected_task]
-            info = task.info(extra=["state"])
-            infoitems = [("args", info.pop("args", None)),
-                         ("kwargs", info.pop("kwargs", None))] + info.items()
+            info = task.info(extra=['state'])
+            infoitems = [('args', info.pop('args', None)),
+                         ('kwargs', info.pop('kwargs', None))] + info.items()
             for key, value in infoitems:
                 if key is None:
                     continue
                 value = str(value)
                 curline = y()
-                keys = key + ": "
+                keys = key + ': '
                 self.win.addstr(curline, 3, keys, curses.A_BOLD)
                 wrapped = wrap(value, mx - 2)
                 if len(wrapped) == 1:
@@ -267,15 +267,15 @@ class CursesMonitor(object):  # pragma: no cover
                     for subline in wrapped:
                         nexty = y()
                         if nexty >= my - 1:
-                            subline = " " * 4 + "[...]"
+                            subline = ' ' * 4 + '[...]'
                         elif nexty >= my:
                             break
                         self.win.addstr(nexty, 3,
-                                abbr(" " * 4 + subline, self.screen_width - 4),
+                                abbr(' ' * 4 + subline, self.screen_width - 4),
                                 curses.A_NORMAL)
 
         return self.alert(alert_callback,
-                "Task details for %s" % self.selected_task)
+                'Task details for %s' % self.selected_task)
 
     def selection_traceback(self):
         if not self.selected_task:
@@ -286,11 +286,11 @@ class CursesMonitor(object):  # pragma: no cover
 
         def alert_callback(my, mx, xs):
             y = count(xs).next
-            for line in task.traceback.split("\n"):
+            for line in task.traceback.split('\n'):
                 self.win.addstr(y(), 3, line)
 
         return self.alert(alert_callback,
-                "Task Exception Traceback for %s" % self.selected_task)
+                'Task Exception Traceback for %s' % self.selected_task)
 
     def selection_result(self):
         if not self.selected_task:
@@ -299,13 +299,13 @@ class CursesMonitor(object):  # pragma: no cover
         def alert_callback(my, mx, xs):
             y = count(xs).next
             task = self.state.tasks[self.selected_task]
-            result = getattr(task, "result", None) or getattr(task,
-                    "exception", None)
+            result = getattr(task, 'result', None) or getattr(task,
+                    'exception', None)
             for line in wrap(result, mx - 2):
                 self.win.addstr(y(), 3, line)
 
         return self.alert(alert_callback,
-                "Task Result for %s" % self.selected_task)
+                'Task Result for %s' % self.selected_task)
 
     def display_task_row(self, lineno, task):
         state_color = self.state_colors.get(task.state)
@@ -314,7 +314,7 @@ class CursesMonitor(object):  # pragma: no cover
             attr = curses.A_STANDOUT
         timestamp = datetime.utcfromtimestamp(
                         task.timestamp or time.time())
-        timef = timestamp.strftime("%H:%M:%S")
+        timef = timestamp.strftime('%H:%M:%S')
         hostname = task.worker.hostname if task.worker else '*NONE*'
         line = self.format_row(task.uuid, task.name,
                                hostname,
@@ -333,12 +333,12 @@ class CursesMonitor(object):  # pragma: no cover
         y = blank_line = count(2).next
         my, mx = win.getmaxyx()
         win.erase()
-        win.bkgd(" ", curses.color_pair(1))
+        win.bkgd(' ', curses.color_pair(1))
         win.border()
         win.addstr(1, x, self.greet, curses.A_DIM | curses.color_pair(5))
         blank_line()
-        win.addstr(y(), x, self.format_row("UUID", "TASK",
-                                           "WORKER", "TIME", "STATE"),
+        win.addstr(y(), x, self.format_row('UUID', 'TASK',
+                                           'WORKER', 'TIME', 'STATE'),
                 curses.A_BOLD | curses.A_UNDERLINE)
         tasks = self.tasks
         if tasks:
@@ -357,22 +357,22 @@ class CursesMonitor(object):  # pragma: no cover
         # Selected Task Info
         if self.selected_task:
             win.addstr(my - 5, x, self.selected_str, curses.A_BOLD)
-            info = "Missing extended info"
-            detail = ""
+            info = 'Missing extended info'
+            detail = ''
             try:
                 selection = self.state.tasks[self.selected_task]
             except KeyError:
                 pass
             else:
-                info = selection.info(["args", "kwargs",
-                                       "result", "runtime", "eta"])
-                if "runtime" in info:
-                    info["runtime"] = "%.2fs" % info["runtime"]
-                if "result" in info:
-                    info["result"] = abbr(info["result"], 16)
-                info = " ".join("%s=%s" % (key, value)
+                info = selection.info(['args', 'kwargs',
+                                       'result', 'runtime', 'eta'])
+                if 'runtime' in info:
+                    info['runtime'] = '%.2fs' % info['runtime']
+                if 'result' in info:
+                    info['result'] = abbr(info['result'], 16)
+                info = ' '.join('%s=%s' % (key, value)
                             for key, value in info.items())
-                detail = "... -> key i"
+                detail = '... -> key i'
             infowin = abbr(info,
                            self.screen_width - len(self.selected_str) - 2,
                            detail)
@@ -383,20 +383,20 @@ class CursesMonitor(object):  # pragma: no cover
                 win.addstr(my - 5, x + len(self.selected_str) + detailpos,
                         detail, curses.A_BOLD)
         else:
-            win.addstr(my - 5, x, "No task selected", curses.A_NORMAL)
+            win.addstr(my - 5, x, 'No task selected', curses.A_NORMAL)
 
         # Workers
         if self.workers:
             win.addstr(my - 4, x, self.online_str, curses.A_BOLD)
             win.addstr(my - 4, x + len(self.online_str),
-                    ", ".join(sorted(self.workers)), curses.A_NORMAL)
+                    ', '.join(sorted(self.workers)), curses.A_NORMAL)
         else:
-            win.addstr(my - 4, x, "No workers discovered.")
+            win.addstr(my - 4, x, 'No workers discovered.')
 
         # Info
         win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
         win.addstr(my - 3, x + len(self.info_str),
-                "events:%s tasks:%s workers:%s/%s" % (
+                'events:%s tasks:%s workers:%s/%s' % (
                     self.state.event_count, self.state.task_count,
                     len([w for w in self.state.workers.values()
                             if w.alive]),
@@ -475,22 +475,22 @@ class DisplayThread(threading.Thread):  # pragma: no cover
 def capture_events(app, state, display):  # pragma: no cover
 
     def on_connection_error(exc, interval):
-        sys.stderr.write("Connection Error: %r. Retry in %ss." % (
+        sys.stderr.write('Connection Error: %r. Retry in %ss.' % (
             exc, interval))
 
     while 1:
-        sys.stderr.write("-> evtop: starting capture...\n")
+        sys.stderr.write('-> evtop: starting capture...\n')
         with app.broker_connection() as conn:
             try:
                 conn.ensure_connection(on_connection_error,
                                        app.conf.BROKER_CONNECTION_MAX_RETRIES)
-                recv = app.events.Receiver(conn, handlers={"*": state.event})
+                recv = app.events.Receiver(conn, handlers={'*': state.event})
                 display.resetscreen()
                 display.init_screen()
                 with recv.consumer():
                     recv.drain_events(timeout=1, ignore_timeouts=True)
             except (conn.connection_errors, conn.channel_errors), exc:
-                sys.stderr.write("Connection lost: %r" % (exc, ))
+                sys.stderr.write('Connection lost: %r' % (exc, ))
 
 
 def evtop(app=None):  # pragma: no cover
@@ -513,5 +513,5 @@ def evtop(app=None):  # pragma: no cover
         display.resetscreen()
 
 
-if __name__ == "__main__":  # pragma: no cover
+if __name__ == '__main__':  # pragma: no cover
     evtop()

+ 25 - 25
celery/events/dumper.py

@@ -19,20 +19,20 @@ from celery.datastructures import LRUCache
 
 TASK_NAMES = LRUCache(limit=0xFFF)
 
-HUMAN_TYPES = {"worker-offline": "shutdown",
-               "worker-online": "started",
-               "worker-heartbeat": "heartbeat"}
+HUMAN_TYPES = {'worker-offline': 'shutdown',
+               'worker-online': 'started',
+               'worker-heartbeat': 'heartbeat'}
 
 
 def humanize_type(type):
     try:
         return HUMAN_TYPES[type.lower()]
     except KeyError:
-        return type.lower().replace("-", " ")
+        return type.lower().replace('-', ' ')
 
 
 def say(msg, out=sys.stdout):
-    out.write(msg + "\n")
+    out.write(msg + '\n')
 
 
 class Dumper(object):
@@ -44,44 +44,44 @@ class Dumper(object):
         say(msg, out=self.out)
 
     def on_event(self, event):
-        timestamp = datetime.utcfromtimestamp(event.pop("timestamp"))
-        type = event.pop("type").lower()
-        hostname = event.pop("hostname")
-        if type.startswith("task-"):
-            uuid = event.pop("uuid")
-            if type in ("task-received", "task-sent"):
-                task = TASK_NAMES[uuid] = "%s(%s) args=%s kwargs=%s" % (
-                        event.pop("name"), uuid,
-                        event.pop("args"),
-                        event.pop("kwargs"))
+        timestamp = datetime.utcfromtimestamp(event.pop('timestamp'))
+        type = event.pop('type').lower()
+        hostname = event.pop('hostname')
+        if type.startswith('task-'):
+            uuid = event.pop('uuid')
+            if type in ('task-received', 'task-sent'):
+                task = TASK_NAMES[uuid] = '%s(%s) args=%s kwargs=%s' % (
+                        event.pop('name'), uuid,
+                        event.pop('args'),
+                        event.pop('kwargs'))
             else:
-                task = TASK_NAMES.get(uuid, "")
+                task = TASK_NAMES.get(uuid, '')
             return self.format_task_event(hostname, timestamp,
                                           type, task, event)
-        fields = ", ".join("%s=%s" % (key, event[key])
+        fields = ', '.join('%s=%s' % (key, event[key])
                         for key in sorted(event.keys()))
-        sep = fields and ":" or ""
-        self.say("%s [%s] %s%s %s" % (hostname, timestamp,
+        sep = fields and ':' or ''
+        self.say('%s [%s] %s%s %s' % (hostname, timestamp,
                                       humanize_type(type), sep, fields))
 
     def format_task_event(self, hostname, timestamp, type, task, event):
-        fields = ", ".join("%s=%s" % (key, event[key])
+        fields = ', '.join('%s=%s' % (key, event[key])
                         for key in sorted(event.keys()))
-        sep = fields and ":" or ""
-        self.say("%s [%s] %s%s %s %s" % (hostname, timestamp,
+        sep = fields and ':' or ''
+        self.say('%s [%s] %s%s %s %s' % (hostname, timestamp,
                     humanize_type(type), sep, task, fields))
 
 
 def evdump(app=None, out=sys.stdout):
     app = app_or_default(app)
     dumper = Dumper(out=out)
-    dumper.say("-> evdump: starting capture...")
+    dumper.say('-> evdump: starting capture...')
     conn = app.broker_connection()
-    recv = app.events.Receiver(conn, handlers={"*": dumper.on_event})
+    recv = app.events.Receiver(conn, handlers={'*': dumper.on_event})
     try:
         recv.capture()
     except (KeyboardInterrupt, SystemExit):
         conn and conn.close()
 
-if __name__ == "__main__":  # pragma: no cover
+if __name__ == '__main__':  # pragma: no cover
     evdump()

+ 6 - 6
celery/events/snapshot.py

@@ -22,12 +22,12 @@ from celery.utils.imports import instantiate
 from celery.utils.log import get_logger
 from celery.utils.timeutils import rate
 
-logger = get_logger("celery.evcam")
+logger = get_logger('celery.evcam')
 
 
 class Polaroid(object):
     timer = timer2
-    shutter_signal = Signal(providing_args=("state", ))
+    shutter_signal = Signal(providing_args=('state', ))
     cleanup_signal = Signal()
     clear_after = False
 
@@ -57,13 +57,13 @@ class Polaroid(object):
         pass
 
     def cleanup(self):
-        logger.debug("Cleanup: Running...")
+        logger.debug('Cleanup: Running...')
         self.cleanup_signal.send(None)
         self.on_cleanup()
 
     def shutter(self):
         if self.maxrate is None or self.maxrate.can_consume():
-            logger.debug("Shutter: %s", self.state)
+            logger.debug('Shutter: %s', self.state)
             self.shutter_signal.send(self.state)
             self.on_shutter(self.state)
 
@@ -95,14 +95,14 @@ def evcam(camera, freq=1.0, maxrate=None, loglevel=0,
     app.log.setup_logging_subsystem(loglevel, logfile)
 
     logger.info(
-        "-> evcam: Taking snapshots with %s (every %s secs.)\n" % (
+        '-> evcam: Taking snapshots with %s (every %s secs.)\n' % (
             camera, freq))
     state = app.events.State()
     cam = instantiate(camera, state, app=app, freq=freq,
                       maxrate=maxrate, timer=timer)
     cam.install()
     conn = app.broker_connection()
-    recv = app.events.Receiver(conn, handlers={"*": state.event})
+    recv = app.events.Receiver(conn, handlers={'*': state.event})
     try:
         try:
             recv.capture(limit=None)

+ 20 - 20
celery/events/state.py

@@ -45,7 +45,7 @@ class Worker(Element):
     expire_window = HEARTBEAT_EXPIRE_WINDOW
 
     def __init__(self, **fields):
-        fields.setdefault("freq", 60)
+        fields.setdefault('freq', 60)
         super(Worker, self).__init__(**fields)
         self.heartbeats = []
 
@@ -71,8 +71,8 @@ class Worker(Element):
                 self.heartbeats = self.heartbeats[self.heartbeat_max:]
 
     def __repr__(self):
-        return "<Worker: %s (%s)" % (self.hostname,
-                                     self.alive and "ONLINE" or "OFFLINE")
+        return '<Worker: %s (%s)' % (self.hostname,
+                                     self.alive and 'ONLINE' or 'OFFLINE')
 
     @property
     def heartbeat_expires(self):
@@ -91,15 +91,15 @@ class Task(Element):
     #: happened before a task-failed event).
     #:
     #: A merge rule consists of a state and a list of fields to keep from
-    #: that state. ``(RECEIVED, ("name", "args")``, means the name and args
+    #: that state. ``(RECEIVED, ('name', 'args')``, means the name and args
     #: fields are always taken from the RECEIVED state, and any values for
     #: these fields received before or after is simply ignored.
-    merge_rules = {states.RECEIVED: ("name", "args", "kwargs",
-                                     "retries", "eta", "expires")}
+    merge_rules = {states.RECEIVED: ('name', 'args', 'kwargs',
+                                     'retries', 'eta', 'expires')}
 
     #: meth:`info` displays these fields by default.
-    _info_fields = ("args", "kwargs", "retries", "result",
-                    "eta", "runtime", "expires", "exception")
+    _info_fields = ('args', 'kwargs', 'retries', 'result',
+                    'eta', 'runtime', 'expires', 'exception')
 
     #: Default values.
     _defaults = dict(uuid=None, name=None, state=states.PENDING,
@@ -186,7 +186,7 @@ class Task(Element):
                             if getattr(self, key, None) is not None)
 
     def __repr__(self):
-        return "<Task: %s(%s) %s>" % (self.name, self.uuid, self.state)
+        return '<Task: %s(%s) %s>' % (self.name, self.uuid, self.state)
 
     @property
     def ready(self):
@@ -203,12 +203,12 @@ class State(object):
         self.workers = LRUCache(limit=max_workers_in_memory)
         self.tasks = LRUCache(limit=max_tasks_in_memory)
         self.event_callback = callback
-        self.group_handlers = {"worker": self.worker_event,
-                               "task": self.task_event}
+        self.group_handlers = {'worker': self.worker_event,
+                               'task': self.task_event}
         self._mutex = Lock()
 
     def freeze_while(self, fun, *args, **kwargs):
-        clear_after = kwargs.pop("clear_after", False)
+        clear_after = kwargs.pop('clear_after', False)
         with self._mutex:
             try:
                 return fun(*args, **kwargs)
@@ -259,21 +259,21 @@ class State(object):
 
     def worker_event(self, type, fields):
         """Process worker event."""
-        hostname = fields.pop("hostname", None)
+        hostname = fields.pop('hostname', None)
         if hostname:
             worker = self.get_or_create_worker(hostname)
-            handler = getattr(worker, "on_%s" % type, None)
+            handler = getattr(worker, 'on_%s' % type, None)
             if handler:
                 handler(**fields)
 
     def task_event(self, type, fields):
         """Process task event."""
-        uuid = fields.pop("uuid")
-        hostname = fields.pop("hostname")
+        uuid = fields.pop('uuid')
+        hostname = fields.pop('hostname')
         worker = self.get_or_create_worker(hostname)
         task = self.get_or_create_task(uuid)
-        handler = getattr(task, "on_%s" % type, None)
-        if type == "received":
+        handler = getattr(task, 'on_%s' % type, None)
+        if type == 'received':
             self.task_count += 1
         if handler:
             handler(**fields)
@@ -288,7 +288,7 @@ class State(object):
     def _dispatch_event(self, event):
         self.event_count += 1
         event = kwdict(event)
-        group, _, type = event.pop("type").partition("-")
+        group, _, type = event.pop('type').partition('-')
         self.group_handlers[group](type, event)
         if self.event_callback:
             self.event_callback(self, event)
@@ -341,7 +341,7 @@ class State(object):
         return [w for w in self.workers.values() if w.alive]
 
     def __repr__(self):
-        return "<ClusterState: events=%s tasks=%s>" % (self.event_count,
+        return '<ClusterState: events=%s tasks=%s>' % (self.event_count,
                                                        self.task_count)
 
 

+ 7 - 7
celery/loaders/__init__.py

@@ -13,9 +13,9 @@ from celery.state import current_app
 from celery.utils import deprecated
 from celery.utils.imports import symbol_by_name
 
-LOADER_ALIASES = {"app": "celery.loaders.app:AppLoader",
-                  "default": "celery.loaders.default:Loader",
-                  "django": "djcelery.loaders:DjangoLoader"}
+LOADER_ALIASES = {'app': 'celery.loaders.app:AppLoader',
+                  'default': 'celery.loaders.default:Loader',
+                  'django': 'djcelery.loaders:DjangoLoader'}
 
 
 def get_loader_cls(loader):
@@ -23,13 +23,13 @@ def get_loader_cls(loader):
     return symbol_by_name(loader, LOADER_ALIASES)
 
 
-@deprecated(deprecation="2.5", removal="3.0",
-        alternative="celery.current_app.loader")
+@deprecated(deprecation='2.5', removal='3.0',
+        alternative='celery.current_app.loader')
 def current_loader():
     return current_app.loader
 
 
-@deprecated(deprecation="2.5", removal="3.0",
-            alternative="celery.current_app.conf")
+@deprecated(deprecation='2.5', removal='3.0',
+            alternative='celery.current_app.conf')
 def load_settings():
     return current_app.conf

+ 12 - 12
celery/loaders/base.py

@@ -121,7 +121,7 @@ class BaseLoader(object):
     def config_from_object(self, obj, silent=False):
         if isinstance(obj, basestring):
             try:
-                if "." in obj:
+                if '.' in obj:
                     obj = symbol_by_name(obj, imp=self.import_from_cwd)
                 else:
                     obj = self.import_from_cwd(obj)
@@ -129,17 +129,17 @@ class BaseLoader(object):
                 if silent:
                     return False
                 raise
-        if not hasattr(obj, "__getitem__"):
+        if not hasattr(obj, '__getitem__'):
             obj = DictAttribute(obj)
         self._conf = obj
         return True
 
-    def cmdline_config_parser(self, args, namespace="celery",
-                re_type=re.compile(r"\((\w+)\)"),
-                extra_types={"json": anyjson.loads},
-                override_types={"tuple": "json",
-                                "list": "json",
-                                "dict": "json"}):
+    def cmdline_config_parser(self, args, namespace='celery',
+                re_type=re.compile(r'\((\w+)\)'),
+                extra_types={'json': anyjson.loads},
+                override_types={'tuple': 'json',
+                                'list': 'json',
+                                'dict': 'json'}):
         from celery.app.defaults import Option, NAMESPACES
         namespace = namespace.upper()
         typemap = dict(Option.typemap, **extra_types)
@@ -151,7 +151,7 @@ class BaseLoader(object):
             ## find key/value
             # ns.key=value|ns_key=value (case insensitive)
             key, value = arg.split('=', 1)
-            key = key.upper().replace(".", "_")
+            key = key.upper().replace('.', '_')
 
             ## find namespace.
             # .key=value|_key=value expands to default namespace.
@@ -161,7 +161,7 @@ class BaseLoader(object):
                 # find namespace part of key
                 ns, key = key.split('_', 1)
 
-            ns_key = (ns and ns + "_" or "") + key
+            ns_key = (ns and ns + '_' or '') + key
 
             # (type)value makes cast to custom type.
             cast = re_type.match(value)
@@ -175,7 +175,7 @@ class BaseLoader(object):
                     value = NAMESPACES[ns][key].to_python(value)
                 except ValueError, exc:
                     # display key name in error message.
-                    raise ValueError("%r: %s" % (ns_key, exc))
+                    raise ValueError('%r: %s' % (ns_key, exc))
             return ns_key, value
 
         return dict(map(getarg, args))
@@ -205,4 +205,4 @@ class BaseLoader(object):
 
     @cached_property
     def mail(self):
-        return self.import_module("celery.utils.mail")
+        return self.import_module('celery.utils.mail')

+ 10 - 10
celery/loaders/default.py

@@ -19,10 +19,10 @@ from celery.utils.imports import NotAPackage, find_module
 
 from .base import BaseLoader
 
-DEFAULT_CONFIG_MODULE = "celeryconfig"
+DEFAULT_CONFIG_MODULE = 'celeryconfig'
 
 #: Warns if configuration file is missing if :envvar:`C_WNOCONF` is set.
-C_WNOCONF = strtobool(os.environ.get("C_WNOCONF", False))
+C_WNOCONF = strtobool(os.environ.get('C_WNOCONF', False))
 
 CONFIG_INVALID_NAME = """
 Error: Module '%(module)s' doesn't exist, or it's not a valid \
@@ -46,7 +46,7 @@ class Loader(BaseLoader):
     def read_configuration(self):
         """Read configuration from :file:`celeryconfig.py` and configure
         celery and Django so it can be used by regular Python."""
-        configname = os.environ.get("CELERY_CONFIG_MODULE",
+        configname = os.environ.get('CELERY_CONFIG_MODULE',
                                      DEFAULT_CONFIG_MODULE)
         try:
             self.find_module(configname)
@@ -54,17 +54,17 @@ class Loader(BaseLoader):
             if configname.endswith('.py'):
                 raise NotAPackage, NotAPackage(
                         CONFIG_WITH_SUFFIX % {
-                            "module": configname,
-                            "suggest": configname[:-3]}), sys.exc_info()[2]
+                            'module': configname,
+                            'suggest': configname[:-3]}), sys.exc_info()[2]
             raise NotAPackage, NotAPackage(
                     CONFIG_INVALID_NAME % {
-                        "module": configname}), sys.exc_info()[2]
+                        'module': configname}), sys.exc_info()[2]
         except ImportError:
             # billiard sets this if forked using execv
-            if C_WNOCONF and not os.environ.get("FORKED_BY_MULTIPROCESSING"):
+            if C_WNOCONF and not os.environ.get('FORKED_BY_MULTIPROCESSING'):
                 warnings.warn(NotConfigured(
-                    "No %r module found! Please make sure it exists and "
-                    "is available to Python." % (configname, )))
+                    'No %r module found! Please make sure it exists and '
+                    'is available to Python.' % (configname, )))
             return self.setup_settings({})
         else:
             celeryconfig = self.import_from_cwd(configname)
@@ -75,4 +75,4 @@ class Loader(BaseLoader):
             return self.setup_settings(usercfg)
 
     def wanted_module_item(self, item):
-        return item[0].isupper() and not item.startswith("_")
+        return item[0].isupper() and not item.startswith('_')

+ 6 - 6
celery/local.py

@@ -191,13 +191,13 @@ class PromiseProxy(Proxy):
 
     def _get_current_object(self):
         try:
-            return object.__getattribute__(self, "__thing")
+            return object.__getattribute__(self, '__thing')
         except AttributeError:
             return self.__evaluate__()
 
     def __evaluated__(self):
         try:
-            object.__getattribute__(self, "__thing")
+            object.__getattribute__(self, '__thing')
         except AttributeError:
             return False
         return True
@@ -208,12 +208,12 @@ class PromiseProxy(Proxy):
     def __evaluate__(self):
         try:
             thing = Proxy._get_current_object(self)
-            object.__setattr__(self, "__thing", thing)
+            object.__setattr__(self, '__thing', thing)
             return thing
         finally:
-            object.__delattr__(self, "_Proxy__local")
-            object.__delattr__(self, "_Proxy__args")
-            object.__delattr__(self, "_Proxy__kwargs")
+            object.__delattr__(self, '_Proxy__local')
+            object.__delattr__(self, '_Proxy__args')
+            object.__delattr__(self, '_Proxy__kwargs')
 
 
 def maybe_evaluate(obj):

+ 57 - 57
celery/platforms.py

@@ -25,23 +25,23 @@ from .local import try_import
 from billiard import current_process
 from kombu.utils.limits import TokenBucket
 
-_setproctitle = try_import("setproctitle")
-resource = try_import("resource")
-pwd = try_import("pwd")
-grp = try_import("grp")
+_setproctitle = try_import('setproctitle')
+resource = try_import('resource')
+pwd = try_import('pwd')
+grp = try_import('grp')
 
-EX_OK = getattr(os, "EX_OK", 0)
+EX_OK = getattr(os, 'EX_OK', 0)
 EX_FAILURE = 1
-EX_UNAVAILABLE = getattr(os, "EX_UNAVAILABLE", 69)
-EX_USAGE = getattr(os, "EX_USAGE", 64)
+EX_UNAVAILABLE = getattr(os, 'EX_UNAVAILABLE', 69)
+EX_USAGE = getattr(os, 'EX_USAGE', 64)
 
 SYSTEM = _platform.system()
-IS_OSX = SYSTEM == "Darwin"
-IS_WINDOWS = SYSTEM == "Windows"
+IS_OSX = SYSTEM == 'Darwin'
+IS_WINDOWS = SYSTEM == 'Windows'
 
 DAEMON_UMASK = 0
-DAEMON_WORKDIR = "/"
-DAEMON_REDIRECT_TO = getattr(os, "devnull", "/dev/null")
+DAEMON_WORKDIR = '/'
+DAEMON_REDIRECT_TO = getattr(os, 'devnull', '/dev/null')
 
 
 PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY
@@ -54,17 +54,17 @@ Seems we're already running? (PID: %s)"""
 
 
 def pyimplementation():
-    if hasattr(_platform, "python_implementation"):
+    if hasattr(_platform, 'python_implementation'):
         return _platform.python_implementation()
-    elif sys.platform.startswith("java"):
-        return "Jython " + sys.platform
-    elif hasattr(sys, "pypy_version_info"):
-        v = ".".join(map(str, sys.pypy_version_info[:3]))
+    elif sys.platform.startswith('java'):
+        return 'Jython ' + sys.platform
+    elif hasattr(sys, 'pypy_version_info'):
+        v = '.'.join(map(str, sys.pypy_version_info[:3]))
         if sys.pypy_version_info[3:]:
-            v += "-" + "".join(map(str, sys.pypy_version_info[3:]))
-        return "PyPy " + v
+            v += '-' + ''.join(map(str, sys.pypy_version_info[3:]))
+        return 'PyPy ' + v
     else:
-        return "CPython"
+        return 'CPython'
 
 
 class LockFailed(Exception):
@@ -123,7 +123,7 @@ class PIDFile(object):
     def read_pid(self):
         """Reads and returns the current pid."""
         try:
-            fh = open(self.path, "r")
+            fh = open(self.path, 'r')
         except IOError, exc:
             if exc.errno == errno.ENOENT:
                 return
@@ -133,14 +133,14 @@ class PIDFile(object):
             line = fh.readline()
             if line.strip() == line:  # must contain '\n'
                 raise ValueError(
-                    "Partially written or invalid pidfile %r" % (self.path))
+                    'Partially written or invalid pidfile %r' % (self.path))
         finally:
             fh.close()
 
         try:
             return int(line.strip())
         except ValueError:
-            raise ValueError("PID file %r contents invalid." % self.path)
+            raise ValueError('PID file %r contents invalid.' % self.path)
 
     def remove(self):
         """Removes the lock."""
@@ -157,7 +157,7 @@ class PIDFile(object):
         try:
             pid = self.read_pid()
         except ValueError, exc:
-            sys.stderr.write("Broken pidfile found. Removing it.\n")
+            sys.stderr.write('Broken pidfile found. Removing it.\n')
             self.remove()
             return True
         if not pid:
@@ -168,17 +168,17 @@ class PIDFile(object):
             os.kill(pid, 0)
         except os.error, exc:
             if exc.errno == errno.ESRCH:
-                sys.stderr.write("Stale pidfile exists. Removing it.\n")
+                sys.stderr.write('Stale pidfile exists. Removing it.\n')
                 self.remove()
                 return True
         return False
 
     def write_pid(self):
         pid = os.getpid()
-        content = "%d\n" % (pid, )
+        content = '%d\n' % (pid, )
 
         pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE)
-        pidfile = os.fdopen(pidfile_fd, "w")
+        pidfile = os.fdopen(pidfile_fd, 'w')
         try:
             pidfile.write(content)
             # flush and sync so that the re-read below works.
@@ -215,7 +215,7 @@ def create_pidlock(pidfile):
 
     .. code-block:: python
 
-        pidlock = create_pidlock("/var/run/app.pid")
+        pidlock = create_pidlock('/var/run/app.pid')
 
     """
     pidlock = PIDFile(pidfile)
@@ -296,31 +296,31 @@ def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
         import atexit
         from celery.platforms import detached, create_pidlock
 
-        with detached(logfile="/var/log/app.log", pidfile="/var/run/app.pid",
-                      uid="nobody"):
+        with detached(logfile='/var/log/app.log', pidfile='/var/run/app.pid',
+                      uid='nobody'):
             # Now in detached child process with effective user set to nobody,
             # and we know that our logfile can be written to, and that
             # the pidfile is not locked.
-            pidlock = create_pidlock("/var/run/app.pid").acquire()
+            pidlock = create_pidlock('/var/run/app.pid').acquire()
             atexit.register(pidlock.release)
 
             # Run the program
-            program.run(logfile="/var/log/app.log")
+            program.run(logfile='/var/log/app.log')
 
     """
 
     if not resource:
-        raise RuntimeError("This platform does not support detach.")
+        raise RuntimeError('This platform does not support detach.')
     workdir = os.getcwd() if workdir is None else workdir
 
-    signals.reset("SIGCLD")  # Make sure SIGCLD is using the default handler.
+    signals.reset('SIGCLD')  # Make sure SIGCLD is using the default handler.
     if not os.geteuid():
         # no point trying to setuid unless we're root.
         maybe_drop_privileges(uid=uid, gid=gid)
 
     # Since without stderr any errors will be silently suppressed,
     # we need to know that we have access to the logfile.
-    logfile and open(logfile, "a").close()
+    logfile and open(logfile, 'a').close()
     # Doesn't actually create the pidfile, but makes sure it's not stale.
     pidfile and create_pidlock(pidfile)
 
@@ -340,7 +340,7 @@ def parse_uid(uid):
         try:
             return pwd.getpwnam(uid).pw_uid
         except (AttributeError, KeyError):
-            raise KeyError("User does not exist: %r" % (uid, ))
+            raise KeyError('User does not exist: %r' % (uid, ))
 
 
 def parse_gid(gid):
@@ -356,7 +356,7 @@ def parse_gid(gid):
         try:
             return grp.getgrnam(gid).gr_gid
         except (AttributeError, KeyError):
-            raise KeyError("Group does not exist: %r" % (gid, ))
+            raise KeyError('Group does not exist: %r' % (gid, ))
 
 
 def _setgroups_hack(groups):
@@ -381,7 +381,7 @@ def _setgroups_hack(groups):
 def setgroups(groups):
     max_groups = None
     try:
-        max_groups = os.sysconf("SC_NGROUPS_MAX")
+        max_groups = os.sysconf('SC_NGROUPS_MAX')
     except Exception:
         pass
     try:
@@ -398,7 +398,7 @@ def initgroups(uid, gid):
     if not pwd:  # pragma: no cover
         return
     username = pwd.getpwuid(uid)[0]
-    if hasattr(os, "initgroups"):  # Python 2.7+
+    if hasattr(os, 'initgroups'):  # Python 2.7+
         return os.initgroups(username, gid)
     groups = [gr.gr_gid for gr in grp.getgrall()
                             if username in gr.gr_mem]
@@ -468,23 +468,23 @@ class Signals(object):
 
         >>> from celery.platforms import signals
 
-        >>> signals["INT"] = my_handler
+        >>> signals['INT'] = my_handler
 
-        >>> signals["INT"]
+        >>> signals['INT']
         my_handler
 
-        >>> signals.supported("INT")
+        >>> signals.supported('INT')
         True
 
-        >>> signals.signum("INT")
+        >>> signals.signum('INT')
         2
 
-        >>> signals.ignore("USR1")
-        >>> signals["USR1"] == signals.ignored
+        >>> signals.ignore('USR1')
+        >>> signals['USR1'] == signals.ignored
         True
 
-        >>> signals.reset("USR1")
-        >>> signals["USR1"] == signals.default
+        >>> signals.reset('USR1')
+        >>> signals['USR1'] == signals.default
         True
 
         >>> signals.update(INT=exit_handler,
@@ -509,9 +509,9 @@ class Signals(object):
             return signal_name
         if not isinstance(signal_name, basestring) \
                 or not signal_name.isupper():
-            raise TypeError("signal name must be uppercase string.")
-        if not signal_name.startswith("SIG"):
-            signal_name = "SIG" + signal_name
+            raise TypeError('signal name must be uppercase string.')
+        if not signal_name.startswith('SIG'):
+            signal_name = 'SIG' + signal_name
         return getattr(_signal, signal_name)
 
     def reset(self, *signal_names):
@@ -561,10 +561,10 @@ ignore_signal = signals.ignore                # compat
 
 
 def strargv(argv):
-    arg_start = 2 if "manage" in argv[0] else 1
+    arg_start = 2 if 'manage' in argv[0] else 1
     if len(argv) > arg_start:
-        return " ".join(argv[arg_start:])
-    return ""
+        return ' '.join(argv[arg_start:])
+    return ''
 
 
 def set_process_title(progname, info=None):
@@ -573,14 +573,14 @@ def set_process_title(progname, info=None):
     Only works if :mod:`setproctitle` is installed.
 
     """
-    proctitle = "[%s]" % progname
-    proctitle = "%s %s" % (proctitle, info) if info else proctitle
+    proctitle = '[%s]' % progname
+    proctitle = '%s %s' % (proctitle, info) if info else proctitle
     if _setproctitle:
         _setproctitle.setproctitle(proctitle)
     return proctitle
 
 
-if os.environ.get("NOSETPS"):  # pragma: no cover
+if os.environ.get('NOSETPS'):  # pragma: no cover
 
     def set_mp_process_title(*a, **k):
         pass
@@ -595,9 +595,9 @@ else:
         """
         if not rate_limit or _setps_bucket.can_consume(1):
             if hostname:
-                progname = "%s@%s" % (progname, hostname.split(".")[0])
+                progname = '%s@%s' % (progname, hostname.split('.')[0])
             return set_process_title(
-                "%s:%s" % (progname, current_process().name), info=info)
+                '%s:%s' % (progname, current_process().name), info=info)
 
 
 def shellsplit(s, posix=True):

+ 7 - 7
celery/result.py

@@ -198,7 +198,7 @@ class AsyncResult(ResultBase):
         return hash(self.id)
 
     def __repr__(self):
-        return "<%s: %s>" % (self.__class__.__name__, self.id)
+        return '<%s: %s>' % (self.__class__.__name__, self.id)
 
     def __eq__(self, other):
         if isinstance(other, AsyncResult):
@@ -489,7 +489,7 @@ class ResultSet(ResultBase):
             if timeout:
                 remaining = timeout - (time.time() - time_start)
                 if remaining <= 0.0:
-                    raise TimeoutError("join operation timed out")
+                    raise TimeoutError('join operation timed out')
             results.append(result.get(timeout=remaining,
                                       propagate=propagate,
                                       interval=interval))
@@ -527,7 +527,7 @@ class ResultSet(ResultBase):
         acc = [None for _ in xrange(len(self))]
         for task_id, meta in self.iter_native(timeout=timeout,
                                               interval=interval):
-            acc[results.index(task_id)] = meta["result"]
+            acc[results.index(task_id)] = meta['result']
         return acc
 
     def __len__(self):
@@ -539,7 +539,7 @@ class ResultSet(ResultBase):
         return NotImplemented
 
     def __repr__(self):
-        return "<%s: %r>" % (self.__class__.__name__,
+        return '<%s: %r>' % (self.__class__.__name__,
                              [r.id for r in self.results])
 
     @property
@@ -603,7 +603,7 @@ class GroupResult(ResultSet):
         return NotImplemented
 
     def __repr__(self):
-        return "<%s: %s %r>" % (self.__class__.__name__, self.id,
+        return '<%s: %s %r>' % (self.__class__.__name__, self.id,
                                 [r.id for r in self.results])
 
     def serializable(self):
@@ -621,8 +621,8 @@ class TaskSetResult(GroupResult):
     def __init__(self, taskset_id, results=None, **kwargs):
         # XXX supports the taskset_id kwarg.
         # XXX previously the "results" arg was named "subtasks".
-        if "subtasks" in kwargs:
-            results = kwargs["subtasks"]
+        if 'subtasks' in kwargs:
+            results = kwargs['subtasks']
         GroupResult.__init__(self, taskset_id, results, **kwargs)
 
     def itersubtasks(self):

+ 36 - 36
celery/schedules.py

@@ -76,7 +76,7 @@ class schedule(object):
         return False, rem
 
     def __repr__(self):
-        return "<freq: %s>" % self.human_seconds
+        return '<freq: %s>' % self.human_seconds
 
     def __eq__(self, other):
         if isinstance(other, schedule):
@@ -110,21 +110,21 @@ class crontab_parser(object):
     The parser is a general purpose one, useful for parsing hours, minutes and
     day_of_week expressions.  Example usage::
 
-        >>> minutes = crontab_parser(60).parse("*/15")
+        >>> minutes = crontab_parser(60).parse('*/15')
         [0, 15, 30, 45]
-        >>> hours = crontab_parser(24).parse("*/4")
+        >>> hours = crontab_parser(24).parse('*/4')
         [0, 4, 8, 12, 16, 20]
-        >>> day_of_week = crontab_parser(7).parse("*")
+        >>> day_of_week = crontab_parser(7).parse('*')
         [0, 1, 2, 3, 4, 5, 6]
 
     It can also parse day_of_month and month_of_year expressions if initialized
     with an minimum of 1.  Example usage::
 
-        >>> days_of_month = crontab_parser(31, 1).parse("*/3")
+        >>> days_of_month = crontab_parser(31, 1).parse('*/3')
         [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31]
-        >>> months_of_year = crontab_parser(12, 1).parse("*/2")
+        >>> months_of_year = crontab_parser(12, 1).parse('*/2')
         [1, 3, 5, 7, 9, 11]
-        >>> months_of_year = crontab_parser(12, 1).parse("2-12/2")
+        >>> months_of_year = crontab_parser(12, 1).parse('2-12/2')
         [2, 4, 6, 8, 10, 12]
 
     The maximum possible expanded value returned is found by the formula::
@@ -151,7 +151,7 @@ class crontab_parser(object):
         acc = set()
         for part in spec.split(','):
             if not part:
-                raise self.ParseException("empty part")
+                raise self.ParseException('empty part')
             acc |= set(self._parse_part(part))
         return acc
 
@@ -171,12 +171,12 @@ class crontab_parser(object):
 
     def _range_steps(self, toks):
         if len(toks) != 3 or not toks[2]:
-            raise self.ParseException("empty filter")
+            raise self.ParseException('empty filter')
         return self._expand_range(toks[:2])[::int(toks[2])]
 
     def _star_steps(self, toks):
         if not toks or not toks[0]:
-            raise self.ParseException("empty filter")
+            raise self.ParseException('empty filter')
         return self._expand_star()[::int(toks[0])]
 
     def _expand_star(self, *args):
@@ -184,7 +184,7 @@ class crontab_parser(object):
 
     def _expand_number(self, s):
         if isinstance(s, basestring) and s[0] == '-':
-            raise self.ParseException("negative numbers not supported")
+            raise self.ParseException('negative numbers not supported')
         try:
             i = int(s)
         except ValueError:
@@ -194,7 +194,7 @@ class crontab_parser(object):
                 raise ValueError("Invalid weekday literal '%s'." % s)
 
         if i < self.min_:
-            raise ValueError("Invalid beginning range - %s < %s." %
+            raise ValueError('Invalid beginning range: %s < %s.' %
                                                    (i, self.min_))
         return i
 
@@ -216,16 +216,16 @@ class crontab(schedule):
         - A (list of) integers from 0-59 that represent the minutes of
           an hour of when execution should occur; or
         - A string representing a crontab pattern.  This may get pretty
-          advanced, like `minute="*/15"` (for every quarter) or
-          `minute="1,13,30-45,50-59/2"`.
+          advanced, like `minute='*/15'` (for every quarter) or
+          `minute='1,13,30-45,50-59/2'`.
 
     .. attribute:: hour
 
         - A (list of) integers from 0-23 that represent the hours of
           a day of when execution should occur; or
         - A string representing a crontab pattern.  This may get pretty
-          advanced, like `hour="*/3"` (for every three hours) or
-          `hour="0,8-17/2"` (at midnight, and every two hours during
+          advanced, like `hour='*/3'` (for every three hours) or
+          `hour='0,8-17/2'` (at midnight, and every two hours during
           office hours).
 
     .. attribute:: day_of_week
@@ -234,17 +234,17 @@ class crontab(schedule):
           6, that represent the days of a week that execution should
           occur.
         - A string representing a crontab pattern.  This may get pretty
-          advanced, like `day_of_week="mon-fri"` (for weekdays only).
-          (Beware that `day_of_week="*/2"` does not literally mean
-          "every two days", but "every day that is divisible by two"!)
+          advanced, like `day_of_week='mon-fri'` (for weekdays only).
+          (Beware that `day_of_week='*/2'` does not literally mean
+          'every two days', but 'every day that is divisible by two'!)
 
     .. attribute:: day_of_month
 
         - A (list of) integers from 1-31 that represents the days of the
           month that execution should occur.
         - A string representing a crontab pattern.  This may get pretty
-          advanced, such as `day_of_month="2-30/3"` (for every even
-          numbered day) or `day_of_month="1-7,15-21"` (for the first and
+          advanced, such as `day_of_month='2-30/3'` (for every even
+          numbered day) or `day_of_month='1-7,15-21'` (for the first and
           third weeks of the month).
 
     .. attribute:: month_of_year
@@ -252,8 +252,8 @@ class crontab(schedule):
         - A (list of) integers from 1-12 that represents the months of
           the year during which execution can occur.
         - A string representing a crontab pattern.  This may get pretty
-          advanced, such as `month_of_year="*/3"` (for the first month
-          of every quarter) or `month_of_year="2-12/2"` (for every even
+          advanced, such as `month_of_year='*/3'` (for the first month
+          of every quarter) or `month_of_year='2-12/2'` (for every even
           numbered month).
 
     It is important to realize that any day on which execution should
@@ -261,7 +261,7 @@ class crontab(schedule):
     month attributes.  For example, if `day_of_week` is 0 and `day_of_month`
     is every seventh day, only months that begin on Sunday and are also
     in the `month_of_year` attribute will have execution events.  Or,
-    `day_of_week` is 1 and `day_of_month` is "1-7,15-21" means every
+    `day_of_week` is 1 and `day_of_month` is '1-7,15-21' means every
     first and third monday of every month present in `month_of_year`.
 
     """
@@ -301,16 +301,16 @@ class crontab(schedule):
             result = set(cronspec)
         else:
             raise TypeError(
-                    "Argument cronspec needs to be of any of the "
-                    "following types: int, basestring, or an iterable type. "
+                    'Argument cronspec needs to be of any of the '
+                    'following types: int, basestring, or an iterable type. '
                     "'%s' was given." % type(cronspec))
 
         # assure the result does not preceed the min or exceed the max
         for number in result:
             if number >= max_ + min_ or number < min_:
                 raise ValueError(
-                        "Invalid crontab pattern. Valid "
-                        "range is %d-%d. '%d' was found." %
+                        'Invalid crontab pattern. Valid '
+                        'range is %d-%d. '%d' was found.' %
                         (min_, max_ - 1 + min_, number))
 
         return result
@@ -389,12 +389,12 @@ class crontab(schedule):
         self.nowfun = nowfun or current_app.now
 
     def __repr__(self):
-        return ("<crontab: %s %s %s %s %s (m/h/d/dM/MY)>" %
-                                            (self._orig_minute or "*",
-                                             self._orig_hour or "*",
-                                             self._orig_day_of_week or "*",
-                                             self._orig_day_of_month or "*",
-                                             self._orig_month_of_year or "*"))
+        return ('<crontab: %s %s %s %s %s (m/h/d/dM/MY)>' %
+                                            (self._orig_minute or '*',
+                                             self._orig_hour or '*',
+                                             self._orig_day_of_week or '*',
+                                             self._orig_day_of_month or '*',
+                                             self._orig_month_of_year or '*'))
 
     def __reduce__(self):
         return (self.__class__, (self._orig_minute,
@@ -435,8 +435,8 @@ class crontab(schedule):
                                       microsecond=0)
             else:
                 next_hour = min(self.hour)
-                all_dom_moy = (self._orig_day_of_month == "*" and
-                                  self._orig_month_of_year == "*")
+                all_dom_moy = (self._orig_day_of_month == '*' and
+                                  self._orig_month_of_year == '*')
                 if all_dom_moy:
                     next_day = min([day for day in self.day_of_week
                                         if day > dow_num] or

+ 2 - 2
celery/security/__init__.py

@@ -40,7 +40,7 @@ def disable_untrusted_serializers(whitelist=None):
 
 
 def setup_security(allowed_serializers=None, key=None, cert=None, store=None,
-        digest="sha1", serializer="json"):
+        digest='sha1', serializer='json'):
     """Setup the message-signing serializer.
 
     Disables untrusted serializers and if configured to use the ``auth``
@@ -67,7 +67,7 @@ def setup_security(allowed_serializers=None, key=None, cert=None, store=None,
     disable_untrusted_serializers(allowed_serializers)
 
     conf = current_app.conf
-    if conf.CELERY_TASK_SERIALIZER != "auth":
+    if conf.CELERY_TASK_SERIALIZER != 'auth':
         return
 
     try:

+ 6 - 6
celery/security/certificate.py

@@ -22,7 +22,7 @@ class Certificate(object):
 
     def __init__(self, cert):
         assert crypto is not None
-        with reraise_errors("Invalid certificate: %r"):
+        with reraise_errors('Invalid certificate: %r'):
             self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
 
     def has_expired(self):
@@ -40,11 +40,11 @@ class Certificate(object):
 
     def get_id(self):
         """Serial number/issuer pair uniquely identifies a certificate"""
-        return "%s %s" % (self.get_issuer(), self.get_serial_number())
+        return '%s %s' % (self.get_issuer(), self.get_serial_number())
 
     def verify(self, data, signature, digest):
         """Verifies the signature for string containing data."""
-        with reraise_errors("Bad signature: %r"):
+        with reraise_errors('Bad signature: %r'):
             crypto.verify(self._cert, signature, data, digest)
 
 
@@ -64,11 +64,11 @@ class CertStore(object):
         try:
             return self._certs[id]
         except KeyError:
-            raise SecurityError("Unknown certificate: %r" % (id, ))
+            raise SecurityError('Unknown certificate: %r' % (id, ))
 
     def add_cert(self, cert):
         if cert.get_id() in self._certs:
-            raise SecurityError("Duplicate certificate: %r" % (id, ))
+            raise SecurityError('Duplicate certificate: %r' % (id, ))
         self._certs[cert.get_id()] = cert
 
 
@@ -84,5 +84,5 @@ class FSCertStore(CertStore):
                 cert = Certificate(f.read())
                 if cert.has_expired():
                     raise SecurityError(
-                        "Expired certificate: %r" % (cert.get_id(), ))
+                        'Expired certificate: %r' % (cert.get_id(), ))
                 self.add_cert(cert)

+ 2 - 2
celery/security/key.py

@@ -15,10 +15,10 @@ from .utils import crypto, reraise_errors
 class PrivateKey(object):
 
     def __init__(self, key):
-        with reraise_errors("Invalid private key: %r"):
+        with reraise_errors('Invalid private key: %r'):
             self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
 
     def sign(self, data, digest):
         """sign string containing data."""
-        with reraise_errors("Unable to sign data: %r"):
+        with reraise_errors('Unable to sign data: %r'):
             return crypto.sign(self._key, data, digest)

+ 15 - 15
celery/security/serialization.py

@@ -30,7 +30,7 @@ def b64decode(s):
 class SecureSerializer(object):
 
     def __init__(self, key=None, cert=None, cert_store=None,
-            digest="sha1", serializer="json"):
+            digest='sha1', serializer='json'):
         self._key = key
         self._cert = cert
         self._cert_store = cert_store
@@ -41,7 +41,7 @@ class SecureSerializer(object):
         """serialize data structure into string"""
         assert self._key is not None
         assert self._cert is not None
-        with reraise_errors("Unable to serialize: %r", (Exception, )):
+        with reraise_errors('Unable to serialize: %r', (Exception, )):
             content_type, content_encoding, body = encode(
                     data, serializer=self._serializer)
             # What we sign is the serialized body, not the body itself.
@@ -55,14 +55,14 @@ class SecureSerializer(object):
     def deserialize(self, data):
         """deserialize data structure from string"""
         assert self._cert_store is not None
-        with reraise_errors("Unable to deserialize: %r", (Exception, )):
+        with reraise_errors('Unable to deserialize: %r', (Exception, )):
             payload = self._unpack(data)
-            signature, signer, body = (payload["signature"],
-                                       payload["signer"],
-                                       payload["body"])
+            signature, signer, body = (payload['signature'],
+                                       payload['signer'],
+                                       payload['body'])
             self._cert_store[signer].verify(body, signature, self._digest)
-        return decode(body, payload["content_type"],
-                            payload["content_encoding"], force=True)
+        return decode(body, payload['content_type'],
+                            payload['content_encoding'], force=True)
 
     def _pack(self, body, content_type, content_encoding, signer, signature,
             sep='\x00\x01'):
@@ -70,18 +70,18 @@ class SecureSerializer(object):
                                    content_type, content_encoding, body]))
 
     def _unpack(self, payload, sep='\x00\x01',
-            fields=("signer", "signature", "content_type",
-                    "content_encoding", "body")):
+            fields=('signer', 'signature', 'content_type',
+                    'content_encoding', 'body')):
         return dict(zip(fields, b64decode(payload).split(sep)))
 
 
-def register_auth(key=None, cert=None, store=None, digest="sha1",
-        serializer="json"):
+def register_auth(key=None, cert=None, store=None, digest='sha1',
+        serializer='json'):
     """register security serializer"""
     s = SecureSerializer(key and PrivateKey(key),
                          cert and Certificate(cert),
                          store and FSCertStore(store),
                          digest=digest, serializer=serializer)
-    registry.register("auth", s.serialize, s.deserialize,
-                      content_type="application/data",
-                      content_encoding="utf-8")
+    registry.register('auth', s.serialize, s.deserialize,
+                      content_type='application/data',
+                      content_encoding='utf-8')

+ 1 - 1
celery/security/utils.py

@@ -21,7 +21,7 @@ except ImportError:  # pragma: no cover
 
 
 @contextmanager
-def reraise_errors(msg="%r", errors=None):
+def reraise_errors(msg='%r', errors=None):
     assert crypto is not None
     errors = (crypto.Error, ) if errors is None else errors
     try:

+ 16 - 26
celery/signals.py

@@ -13,40 +13,30 @@
 
 """
 from __future__ import absolute_import
-
 from .utils.dispatch import Signal
 
-task_sent = Signal(providing_args=["task_id", "task",
-                                   "args", "kwargs",
-                                   "eta", "taskset"])
-task_prerun = Signal(providing_args=["task_id", "task",
-                                     "args", "kwargs"])
-task_postrun = Signal(providing_args=["task_id", "task",
-                                      "args", "kwargs", "retval"])
-task_success = Signal(providing_args=["result"])
-task_failure = Signal(providing_args=["task_id", "exception",
-                                      "args", "kwargs", "traceback",
-                                      "einfo"])
-
-celeryd_init = Signal(providing_args=["instance"])
-
+task_sent = Signal(providing_args=[
+    'task_id', 'task', 'args', 'kwargs', 'eta', 'taskset'])
+task_prerun = Signal(providing_args=['task_id', 'task', 'args', 'kwargs'])
+task_postrun = Signal(providing_args=[
+    'task_id', 'task', 'args', 'kwargs', 'retval'])
+task_success = Signal(providing_args=['result'])
+task_failure = Signal(providing_args=[
+    'task_id', 'exception', 'args', 'kwargs', 'traceback', 'einfo'])
+celeryd_init = Signal(providing_args=['instance'])
 worker_init = Signal(providing_args=[])
 worker_process_init = Signal(providing_args=[])
 worker_ready = Signal(providing_args=[])
 worker_shutdown = Signal(providing_args=[])
-
-setup_logging = Signal(providing_args=["loglevel", "logfile",
-                                       "format", "colorize"])
-after_setup_logger = Signal(providing_args=["logger", "loglevel", "logfile",
-                                            "format", "colorize"])
-after_setup_task_logger = Signal(providing_args=["logger", "loglevel",
-                                                 "logfile", "format",
-                                                 "colorize"])
-
+setup_logging = Signal(providing_args=[
+    'loglevel', 'logfile', 'format', 'colorize'])
+after_setup_logger = Signal(providing_args=[
+    'logger', 'loglevel', 'logfile', 'format', 'colorize'])
+after_setup_task_logger = Signal(providing_args=[
+    'logger', 'loglevel', 'logfile', 'format', 'colorize'])
 beat_init = Signal(providing_args=[])
 beat_embedded_init = Signal(providing_args=[])
-
 eventlet_pool_started = Signal(providing_args=[])
 eventlet_pool_preshutdown = Signal(providing_args=[])
 eventlet_pool_postshutdown = Signal(providing_args=[])
-eventlet_pool_apply = Signal(providing_args=["target", "args", "kwargs"])
+eventlet_pool_apply = Signal(providing_args=['target', 'args', 'kwargs'])

+ 14 - 14
celery/states.py

@@ -60,14 +60,14 @@ from __future__ import absolute_import
 #: State precedence.
 #: None represents the precedence of an unknown state.
 #: Lower index means higher precedence.
-PRECEDENCE = ["SUCCESS",
-              "FAILURE",
+PRECEDENCE = ['SUCCESS',
+              'FAILURE',
               None,
-              "REVOKED",
-              "STARTED",
-              "RECEIVED",
-              "RETRY",
-              "PENDING"]
+              'REVOKED',
+              'STARTED',
+              'RECEIVED',
+              'RETRY',
+              'PENDING']
 
 
 def precedence(state):
@@ -101,13 +101,13 @@ class state(str):
     def __le__(self, other):
         return self.compare(other, lambda a, b: a >= b, False)
 
-PENDING = "PENDING"
-RECEIVED = "RECEIVED"
-STARTED = "STARTED"
-SUCCESS = "SUCCESS"
-FAILURE = "FAILURE"
-REVOKED = "REVOKED"
-RETRY = "RETRY"
+PENDING = 'PENDING'
+RECEIVED = 'RECEIVED'
+STARTED = 'STARTED'
+SUCCESS = 'SUCCESS'
+FAILURE = 'FAILURE'
+REVOKED = 'REVOKED'
+RETRY = 'RETRY'
 
 READY_STATES = frozenset([SUCCESS, FAILURE, REVOKED])
 UNREADY_STATES = frozenset([PENDING, RECEIVED, STARTED, RETRY])

+ 6 - 6
celery/task/__init__.py

@@ -21,19 +21,19 @@ class module(MagicModule):
 
 old_module, new_module = recreate_module(__name__,  # pragma: no cover
     by_module={
-        "celery.task.base":   ["BaseTask", "Task", "PeriodicTask",
-                               "task", "periodic_task"],
-        "celery.canvas":      ["chain", "group", "chord", "subtask"],
-        "celery.task.sets":   ["TaskSet"],
+        'celery.task.base':   ['BaseTask', 'Task', 'PeriodicTask',
+                               'task', 'periodic_task'],
+        'celery.canvas':      ['chain', 'group', 'chord', 'subtask'],
+        'celery.task.sets':   ['TaskSet'],
     },
     base=module,
-    __package__="celery.task",
+    __package__='celery.task',
     __file__=__file__,
     __path__=__path__,
     __doc__=__doc__,
     current=current,
     discard_all=Proxy(lambda: current_app.control.purge),
     backend_cleanup=Proxy(
-        lambda: current_app.tasks["celery.backend_cleanup"]
+        lambda: current_app.tasks['celery.backend_cleanup']
     ),
 )

+ 22 - 22
celery/task/base.py

@@ -20,8 +20,8 @@ from celery.schedules import maybe_schedule
 
 #: list of methods that must be classmethods in the old API.
 _COMPAT_CLASSMETHODS = (
-    "delay", "apply_async", "retry", "apply",
-    "AsyncResult", "subtask", "push_request", "pop_request")
+    'delay', 'apply_async', 'retry', 'apply',
+    'AsyncResult', 'subtask', 'push_request', 'pop_request')
 
 
 class Task(BaseTask):
@@ -43,14 +43,14 @@ class Task(BaseTask):
     mandatory = False
     immediate = False
     priority = None
-    type = "regular"
+    type = 'regular'
     error_whitelist = ()
     disable_error_emails = False
 
     from_config = BaseTask.from_config + (
-        ("exchange_type", "CELERY_DEFAULT_EXCHANGE_TYPE"),
-        ("delivery_mode", "CELERY_DEFAULT_DELIVERY_MODE"),
-        ("error_whitelist", "CELERY_TASK_ERROR_WHITELIST"),
+        ('exchange_type', 'CELERY_DEFAULT_EXCHANGE_TYPE'),
+        ('delivery_mode', 'CELERY_DEFAULT_DELIVERY_MODE'),
+        ('error_whitelist', 'CELERY_TASK_ERROR_WHITELIST'),
     )
 
     # In old Celery the @task decorator didn't exist, so one would create
@@ -135,21 +135,21 @@ class PeriodicTask(Task):
     compat = True
 
     def __init__(self):
-        if not hasattr(self, "run_every"):
+        if not hasattr(self, 'run_every'):
             raise NotImplementedError(
-                    "Periodic tasks must have a run_every attribute")
+                    'Periodic tasks must have a run_every attribute')
         self.run_every = maybe_schedule(self.run_every, self.relative)
         super(PeriodicTask, self).__init__()
 
     @classmethod
     def on_bound(cls, app):
         app.conf.CELERYBEAT_SCHEDULE[cls.name] = {
-                "task": cls.name,
-                "schedule": cls.run_every,
-                "args": (),
-                "kwargs": {},
-                "options": cls.options or {},
-                "relative": cls.relative,
+                'task': cls.name,
+                'schedule': cls.run_every,
+                'args': (),
+                'kwargs': {},
+                'options': cls.options or {},
+                'relative': cls.relative,
         }
 
 
@@ -177,13 +177,13 @@ def task(*args, **kwargs):
 
     Calling the resulting task:
 
-            >>> refresh_feed("http://example.com/rss") # Regular
+            >>> refresh_feed('http://example.com/rss') # Regular
             <Feed: http://example.com/rss>
-            >>> refresh_feed.delay("http://example.com/rss") # Async
+            >>> refresh_feed.delay('http://example.com/rss') # Async
             <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
     """
-    return current_app.task(*args, **dict({"accept_magic_kwargs": False,
-                                           "base": Task}, **kwargs))
+    return current_app.task(*args, **dict({'accept_magic_kwargs': False,
+                                           'base': Task}, **kwargs))
 
 
 def periodic_task(*args, **options):
@@ -203,7 +203,7 @@ def periodic_task(*args, **options):
 
                 from celery.task import current
 
-                @task(exchange="feeds")
+                @task(exchange='feeds')
                 def refresh_feed(url):
                     try:
                         return Feed.objects.get(url=url).refresh()
@@ -212,10 +212,10 @@ def periodic_task(*args, **options):
 
             Calling the resulting task:
 
-                >>> refresh_feed("http://example.com/rss") # Regular
+                >>> refresh_feed('http://example.com/rss') # Regular
                 <Feed: http://example.com/rss>
-                >>> refresh_feed.delay("http://example.com/rss") # Async
+                >>> refresh_feed.delay('http://example.com/rss') # Async
                 <AsyncResult: 8998d0f4-da0b-4669-ba03-d5ab5ac6ad5d>
 
     """
-    return task(**dict({"base": PeriodicTask}, **options))
+    return task(**dict({'base': PeriodicTask}, **options))

+ 23 - 23
celery/task/http.py

@@ -22,7 +22,7 @@ except ImportError:  # pragma: no cover
 from celery import __version__ as celery_version
 from .base import Task as BaseTask
 
-GET_METHODS = frozenset(["GET", "HEAD"])
+GET_METHODS = frozenset(['GET', 'HEAD'])
 
 
 class InvalidResponseError(Exception):
@@ -40,7 +40,7 @@ class UnknownStatusError(InvalidResponseError):
 def maybe_utf8(value):
     """Encode to utf-8, only if the value is Unicode."""
     if isinstance(value, unicode):
-        return value.encode("utf-8")
+        return value.encode('utf-8')
     return value
 
 
@@ -55,25 +55,25 @@ else:
     def utf8dict(tup):  # noqa
         """With a dict's items() tuple return a new dict with any utf-8
         keys/values encoded."""
-        return dict((key.encode("utf-8"), maybe_utf8(value))
+        return dict((key.encode('utf-8'), maybe_utf8(value))
                         for key, value in tup)
 
 
 def extract_response(raw_response, loads=anyjson.loads):
     """Extract the response text from a raw JSON response."""
     if not raw_response:
-        raise InvalidResponseError("Empty response")
+        raise InvalidResponseError('Empty response')
     try:
         payload = loads(raw_response)
     except ValueError, exc:
         raise InvalidResponseError, InvalidResponseError(
                 str(exc)), sys.exc_info()[2]
 
-    status = payload["status"]
-    if status == "success":
-        return payload["retval"]
-    elif status == "failure":
-        raise RemoteExecuteError(payload.get("reason"))
+    status = payload['status']
+    if status == 'success':
+        return payload['retval']
+    elif status == 'failure':
+        raise RemoteExecuteError(payload.get('reason'))
     else:
         raise UnknownStatusError(str(status))
 
@@ -87,13 +87,13 @@ class MutableURL(object):
 
     Examples
 
-        >>> url = URL("http://www.google.com:6580/foo/bar?x=3&y=4#foo")
+        >>> url = URL('http://www.google.com:6580/foo/bar?x=3&y=4#foo')
         >>> url.query
         {'x': '3', 'y': '4'}
         >>> str(url)
         'http://www.google.com:6580/foo/bar?y=4&x=3#foo'
-        >>> url.query["x"] = 10
-        >>> url.query.update({"George": "Costanza"})
+        >>> url.query['x'] = 10
+        >>> url.query.update({'George': 'Costanza'})
         >>> str(url)
         'http://www.google.com:6580/foo/bar?y=4&x=10&George=Costanza#foo'
 
@@ -105,14 +105,14 @@ class MutableURL(object):
     def __str__(self):
         scheme, netloc, path, params, query, fragment = self.parts
         query = urlencode(utf8dict(self.query.items()))
-        components = [scheme + "://", netloc, path or "/",
-                      ";%s" % params   if params   else "",
-                      "?%s" % query    if query    else "",
-                      "#%s" % fragment if fragment else ""]
-        return "".join(filter(None, components))
+        components = [scheme + '://', netloc, path or '/',
+                      ';%s' % params   if params   else '',
+                      '?%s' % query    if query    else '',
+                      '#%s' % fragment if fragment else '']
+        return ''.join(filter(None, components))
 
     def __repr__(self):
-        return "<%s: %s>" % (self.__class__.__name__, str(self))
+        return '<%s: %s>' % (self.__class__.__name__, str(self))
 
 
 class HttpDispatch(object):
@@ -125,7 +125,7 @@ class HttpDispatch(object):
     :param logger: Logger used for user/system feedback.
 
     """
-    user_agent = "celery/%s" % celery_version
+    user_agent = 'celery/%s' % celery_version
     timeout = 5
 
     def __init__(self, url, method, task_kwargs, logger=None):
@@ -155,7 +155,7 @@ class HttpDispatch(object):
 
     @property
     def http_headers(self):
-        headers = {"User-Agent": self.user_agent}
+        headers = {'User-Agent': self.user_agent}
         return headers
 
 
@@ -185,7 +185,7 @@ class HttpDispatchTask(BaseTask):
     method = None
     accept_magic_kwargs = False
 
-    def run(self, url=None, method="GET", **kwargs):
+    def run(self, url=None, method='GET', **kwargs):
         url = url or self.url
         method = method or self.method
         return HttpDispatch(url, method, kwargs, self.logger).dispatch()
@@ -208,7 +208,7 @@ class URL(MutableURL):
         self.dispatcher = dispatcher or self.dispatcher
 
     def get_async(self, **kwargs):
-        return self.dispatcher.delay(str(self), "GET", **kwargs)
+        return self.dispatcher.delay(str(self), 'GET', **kwargs)
 
     def post_async(self, **kwargs):
-        return self.dispatcher.delay(str(self), "POST", **kwargs)
+        return self.dispatcher.delay(str(self), 'POST', **kwargs)

+ 1 - 1
celery/task/sets.py

@@ -24,7 +24,7 @@ class TaskSet(list):
 
     Example::
 
-        >>> urls = ("http://cnn.com/rss", "http://bbc.co.uk/rss")
+        >>> urls = ('http://cnn.com/rss', 'http://bbc.co.uk/rss')
         >>> s = TaskSet(refresh_feed.s(url) for url in urls)
         >>> taskset_result = s.apply_async()
         >>> list_of_return_values = taskset_result.join()  # *expensive*

+ 10 - 10
celery/task/trace.py

@@ -75,7 +75,7 @@ def task_has_custom(task, attr):
 
 
 class TraceInfo(object):
-    __slots__ = ("state", "retval")
+    __slots__ = ('state', 'retval')
 
     def __init__(self, state, retval=None):
         self.state = state
@@ -102,7 +102,7 @@ class TraceInfo(object):
         try:
             exc = self.retval
             message, orig_exc = exc.args
-            expanded_msg = "%s: %s" % (message, str(orig_exc))
+            expanded_msg = '%s: %s' % (message, str(orig_exc))
             einfo = ExceptionInfo((type_, type_(expanded_msg, None), tb))
             if store_errors:
                 task.backend.mark_as_retry(req.id, orig_exc, einfo.traceback)
@@ -136,7 +136,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
     # If the task doesn't define a custom __call__ method
     # we optimize it away by simply calling the run method directly,
     # saving the extra method call and a line less in the stack trace.
-    fun = task if task_has_custom(task, "__call__") else task.run
+    fun = task if task_has_custom(task, '__call__') else task.run
 
     loader = loader or current_app.loader
     backend = task.backend
@@ -151,9 +151,9 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
 
     task_on_success = None
     task_after_return = None
-    if task_has_custom(task, "on_success"):
+    if task_has_custom(task, 'on_success'):
         task_on_success = task.on_success
-    if task_has_custom(task, "after_return"):
+    if task_has_custom(task, 'after_return'):
         task_after_return = task.after_return
 
     store_result = backend.store_result
@@ -186,8 +186,8 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                                 args=args, kwargs=kwargs)
                 loader_task_init(uuid, task)
                 if track_started:
-                    store_result(uuid, {"pid": pid,
-                                        "hostname": hostname}, STARTED)
+                    store_result(uuid, {'pid': pid,
+                                        'hostname': hostname}, STARTED)
 
                 # -*- TRACE -*-
                 try:
@@ -248,7 +248,7 @@ def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
                     except (KeyboardInterrupt, SystemExit, MemoryError):
                         raise
                     except Exception, exc:
-                        _logger.error("Process cleanup failed: %r", exc,
+                        _logger.error('Process cleanup failed: %r', exc,
                                       exc_info=True)
         except Exception, exc:
             if eager:
@@ -273,7 +273,7 @@ def trace_task_ret(task, uuid, args, kwargs, request={}):
 
 
 def eager_trace_task(task, uuid, args, kwargs, request=None, **opts):
-    opts.setdefault("eager", True)
+    opts.setdefault('eager', True)
     return build_tracer(task.name, task, **opts)(
             uuid, args, kwargs, request)
 
@@ -284,7 +284,7 @@ def report_internal_error(task, exc):
         _value = task.backend.prepare_exception(exc)
         exc_info = ExceptionInfo((_type, _value, _tb), internal=True)
         warn(RuntimeWarning(
-            "Exception raised outside body: %r:\n%s" % (
+            'Exception raised outside body: %r:\n%s' % (
                 exc, exc_info.traceback)))
         return exc_info
     finally:

+ 18 - 18
celery/utils/__init__.py

@@ -38,9 +38,9 @@ DEPRECATION_FMT = """
 
 def warn_deprecated(description=None, deprecation=None, removal=None,
         alternative=None):
-    ctx = {"description": description,
-           "deprecation": deprecation, "removal": removal,
-           "alternative": alternative}
+    ctx = {'description': description,
+           'deprecation': deprecation, 'removal': removal,
+           'alternative': alternative}
     if deprecation is not None:
         w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT % ctx)
     else:
@@ -96,15 +96,15 @@ def fun_takes_kwargs(fun, kwlist=[]):
 
         >>> def foo(self, x, y, logfile=None, loglevel=None):
         ...     return x * y
-        >>> fun_takes_kwargs(foo, ["logfile", "loglevel", "task_id"])
-        ["logfile", "loglevel"]
+        >>> fun_takes_kwargs(foo, ['logfile', 'loglevel', 'task_id'])
+        ['logfile', 'loglevel']
 
         >>> def foo(self, x, y, **kwargs):
-        >>> fun_takes_kwargs(foo, ["logfile", "loglevel", "task_id"])
-        ["logfile", "loglevel", "task_id"]
+        >>> fun_takes_kwargs(foo, ['logfile', 'loglevel', 'task_id'])
+        ['logfile', 'loglevel', 'task_id']
 
     """
-    argspec = getattr(fun, "argspec", getargspec(fun))
+    argspec = getattr(fun, 'argspec', getargspec(fun))
     args, _varargs, keywords, _defaults = argspec
     if keywords != None:
         return kwlist
@@ -114,7 +114,7 @@ def fun_takes_kwargs(fun, kwlist=[]):
 def isatty(fh):
     # Fixes bug with mod_wsgi:
     #   mod_wsgi.Log object has no attribute isatty.
-    return getattr(fh, "isatty", None) and fh.isatty()
+    return getattr(fh, 'isatty', None) and fh.isatty()
 
 
 def cry():  # pragma: no cover
@@ -128,26 +128,26 @@ def cry():  # pragma: no cover
     # get a map of threads by their ID so we can print their names
     # during the traceback dump
     for t in threading.enumerate():
-        if getattr(t, "ident", None):
+        if getattr(t, 'ident', None):
             tmap[t.ident] = t
         else:
             main_thread = t
 
     out = StringIO()
-    sep = "=" * 49 + "\n"
+    sep = '=' * 49 + '\n'
     for tid, frame in sys._current_frames().iteritems():
         thread = tmap.get(tid, main_thread)
         if not thread:
             # skip old junk (left-overs from a fork)
             continue
-        out.write("%s\n" % (thread.getName(), ))
+        out.write('%s\n' % (thread.getName(), ))
         out.write(sep)
         traceback.print_stack(frame, file=out)
         out.write(sep)
-        out.write("LOCAL VARIABLES\n")
+        out.write('LOCAL VARIABLES\n')
         out.write(sep)
         pprint(frame.f_locals, stream=out)
-        out.write("\n\n")
+        out.write('\n\n')
     return out.getvalue()
 
 
@@ -163,14 +163,14 @@ def maybe_reraise():
         del(exc_info)
 
 
-def strtobool(term, table={"false": False, "no": False, "0": False,
-                             "true":  True, "yes": True,  "1": True,
-                             "on":    True, "off": False}):
+def strtobool(term, table={'false': False, 'no': False, '0': False,
+                             'true':  True, 'yes': True,  '1': True,
+                             'on':    True, 'off': False}):
     if isinstance(term, basestring):
         try:
             return table[term.lower()]
         except KeyError:
-            raise TypeError("Can't coerce %r to type bool" % (term, ))
+            raise TypeError('Cannot coerce %r to type bool' % (term, ))
     return term
 
 # ------------------------------------------------------------------------ #

+ 3 - 3
celery/utils/compat.py

@@ -56,7 +56,7 @@ except ImportError:                         # pragma: no cover
     import itertools
 
     def zip_longest(*args, **kwds):  # noqa
-        fillvalue = kwds.get("fillvalue")
+        fillvalue = kwds.get('fillvalue')
 
         def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):
             yield counter()     # yields the fillvalue, or raises IndexError
@@ -81,7 +81,7 @@ def _compat_chain_from_iterable(iterables):  # pragma: no cover
             yield element
 
 try:
-    chain_from_iterable = getattr(chain, "from_iterable")
+    chain_from_iterable = getattr(chain, 'from_iterable')
 except AttributeError:   # pragma: no cover
     chain_from_iterable = _compat_chain_from_iterable
 
@@ -92,7 +92,7 @@ import os
 from stat import ST_DEV, ST_INO
 import platform as _platform
 
-if _platform.system() == "Windows":  # pragma: no cover
+if _platform.system() == 'Windows':  # pragma: no cover
     #since windows doesn't go with WatchedFileHandler use FileHandler instead
     WatchedFileHandler = logging.FileHandler
 else:

+ 5 - 5
celery/utils/debug.py

@@ -39,16 +39,16 @@ def memdump(samples=10):
 
     """
     if ps() is None:
-        print("- rss: (psutil not installed).")
+        print('- rss: (psutil not installed).')
         return
     if filter(None, _mem_sample):
-        print("- rss (sample):")
+        print('- rss (sample):')
         for mem in sample(_mem_sample, samples):
-            print("-    > %s," % mem)
+            print('-    > %s,' % mem)
         _mem_sample[:] = []
     import gc
     gc.collect()
-    print("- rss (end): %s." % (mem_rss()))
+    print('- rss (end): %s.' % (mem_rss()))
 
 
 def sample(x, n, k=0):
@@ -70,7 +70,7 @@ def mem_rss():
     """Returns RSS memory usage as a humanized string."""
     p = ps()
     if p is not None:
-        return "%sMB" % (format_d(p.get_memory_info().rss // 1024), )
+        return '%sMB' % (format_d(p.get_memory_info().rss // 1024), )
 
 
 def ps():

+ 8 - 13
celery/utils/functional.py

@@ -91,7 +91,7 @@ class LRUCache(UserDict):
 
 def is_list(l):
     """Returns true if object is list-like, but not a dict or string."""
-    return hasattr(l, "__iter__") and not isinstance(l, (dict, basestring))
+    return hasattr(l, '__iter__') and not isinstance(l, (dict, basestring))
 
 
 def maybe_list(l):
@@ -219,13 +219,13 @@ def padlist(container, size, default=None):
 
     Examples:
 
-        >>> first, last, city = padlist(["George", "Costanza", "NYC"], 3)
-        ("George", "Costanza", "NYC")
-        >>> first, last, city = padlist(["George", "Costanza"], 3)
-        ("George", "Costanza", None)
-        >>> first, last, city, planet = padlist(["George", "Costanza",
-                                                 "NYC"], 4, default="Earth")
-        ("George", "Costanza", "NYC", "Earth")
+        >>> first, last, city = padlist(['George', 'Costanza', 'NYC'], 3)
+        ('George', 'Costanza', 'NYC')
+        >>> first, last, city = padlist(['George', 'Costanza'], 3)
+        ('George', 'Costanza', None)
+        >>> first, last, city, planet = padlist(['George', 'Costanza',
+                                                 'NYC'], 4, default='Earth')
+        ('George', 'Costanza', 'NYC', 'Earth')
 
     """
     return list(container)[:size] + [default] * (size - len(container))
@@ -238,11 +238,6 @@ def mattrgetter(*attrs):
                                 for attr in attrs)
 
 
-def _add(s, x):
-    print("ADD %r" % (x, ))
-    s.add(x)
-
-
 def uniq(it):
     """Returns all unique elements in ``it``, preserving order."""
     seen = set()

+ 8 - 8
celery/utils/imports.py

@@ -31,7 +31,7 @@ if sys.version_info >= (3, 3):  # pragma: no cover
 else:
 
     def qualname(obj):  # noqa
-        if not hasattr(obj, "__name__") and hasattr(obj, "__class__"):
+        if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
             return qualname(obj.__class__)
 
         return '.'.join([obj.__module__, obj.__name__])
@@ -59,11 +59,11 @@ def symbol_by_name(name, aliases={}, imp=None, package=None,
 
     Examples:
 
-        >>> symbol_by_name("celery.concurrency.processes.TaskPool")
+        >>> symbol_by_name('celery.concurrency.processes.TaskPool')
         <class 'celery.concurrency.processes.TaskPool'>
 
-        >>> symbol_by_name("default", {
-        ...     "default": "celery.concurrency.processes.TaskPool"})
+        >>> symbol_by_name('default', {
+        ...     'default': 'celery.concurrency.processes.TaskPool'})
         <class 'celery.concurrency.processes.TaskPool'>
 
         # Does not try to look up non-string names.
@@ -126,11 +126,11 @@ def find_module(module, path=None, imp=None):
     if imp is None:
         imp = importlib.import_module
     with cwd_in_path():
-        if "." in module:
+        if '.' in module:
             last = None
-            parts = module.split(".")
+            parts = module.split('.')
             for i, part in enumerate(parts[:-1]):
-                mpart = imp(".".join(parts[:i + 1]))
+                mpart = imp('.'.join(parts[:i + 1]))
                 try:
                     path = mpart.__path__
                 except AttributeError:
@@ -162,4 +162,4 @@ def reload_from_cwd(module, reloader=None):
 
 def module_file(module):
     name = module.__file__
-    return name[:-1] if name.endswith(".pyc") else name
+    return name[:-1] if name.endswith('.pyc') else name

+ 14 - 14
celery/utils/log.py

@@ -23,7 +23,7 @@ from .term import colored
 _process_aware = False
 is_py3k = sys.version_info[0] == 3
 
-MP_LOG = os.environ.get("MP_LOG", False)
+MP_LOG = os.environ.get('MP_LOG', False)
 
 
 # Sets up our logging hierarchy.
@@ -31,8 +31,8 @@ MP_LOG = os.environ.get("MP_LOG", False)
 # Every logger in the celery package inherits from the "celery"
 # logger, and every task logger inherits from the "celery.task"
 # logger.
-base_logger = logger = _get_logger("celery")
-mp_logger = _get_logger("multiprocessing")
+base_logger = logger = _get_logger('celery')
+mp_logger = _get_logger('multiprocessing')
 
 in_sighandler = False
 
@@ -47,7 +47,7 @@ def get_logger(name):
     if logging.root not in (l, l.parent) and l is not base_logger:
         l.parent = base_logger
     return l
-task_logger = get_logger("celery.task")
+task_logger = get_logger('celery.task')
 
 
 def get_task_logger(name):
@@ -66,8 +66,8 @@ def mlevel(level):
 class ColorFormatter(logging.Formatter):
     #: Loglevel -> Color mapping.
     COLORS = colored().names
-    colors = {"DEBUG": COLORS["blue"], "WARNING": COLORS["yellow"],
-              "ERROR": COLORS["red"], "CRITICAL": COLORS["magenta"]}
+    colors = {'DEBUG': COLORS['blue'], 'WARNING': COLORS['yellow'],
+              'ERROR': COLORS['red'], 'CRITICAL': COLORS['magenta']}
 
     def __init__(self, fmt=None, use_color=True):
         logging.Formatter.__init__(self, fmt)
@@ -87,16 +87,16 @@ class ColorFormatter(logging.Formatter):
             try:
                 record.msg = safe_str(str_t(color(record.msg)))
             except Exception, exc:
-                record.msg = "<Unrepresentable %r: %r>" % (
+                record.msg = '<Unrepresentable %r: %r>' % (
                         type(record.msg), exc)
                 record.exc_info = True
 
-        if not is_py3k and "processName" not in record.__dict__:
+        if not is_py3k and 'processName' not in record.__dict__:
             # Very ugly, but have to make sure processName is supported
             # by foreign logger instances.
             # (processName is always supported by Python 2.7)
-            process_name = current_process and current_process()._name or ""
-            record.__dict__["processName"] = process_name
+            process_name = current_process and current_process()._name or ''
+            record.__dict__['processName'] = process_name
         return safe_str(logging.Formatter.format(self, record))
 
 
@@ -107,7 +107,7 @@ class LoggingProxy(object):
     :param loglevel: Loglevel to use when writing messages.
 
     """
-    mode = "w"
+    mode = 'w'
     name = None
     closed = False
     loglevel = logging.ERROR
@@ -148,7 +148,7 @@ class LoggingProxy(object):
         """Write message to logging object."""
         if in_sighandler:
             return sys.__stderr__.write(safe_str(data))
-        if getattr(self._thread, "recurse_protection", False):
+        if getattr(self._thread, 'recurse_protection', False):
             # Logger is logging back to this file, so stop recursing.
             return
         data = data.strip()
@@ -217,7 +217,7 @@ def get_multiprocessing_logger():
 
 
 def reset_multiprocessing_logger():
-    if mputil and hasattr(mputil, "_logger"):
+    if mputil and hasattr(mputil, '_logger'):
         mputil._logger = None
 
 
@@ -234,7 +234,7 @@ def _patch_logger_class():
 
                 def log(self, *args, **kwargs):
                     if in_sighandler:
-                        sys.__stderr__.write("IN SIGHANDLER WON'T LOG")
+                        sys.__stderr__.write('CANNOT LOG IN SIGHANDLER')
                         return
                     return OldLoggerClass.log(self, *args, **kwargs)
             logging.setLoggerClass(SigSafeLogger)

+ 15 - 15
celery/utils/mail.py

@@ -28,7 +28,7 @@ class SendmailWarning(UserWarning):
 class Message(object):
 
     def __init__(self, to=None, sender=None, subject=None, body=None,
-            charset="us-ascii"):
+            charset='us-ascii'):
         self.to = maybe_list(to)
         self.sender = sender
         self.subject = subject
@@ -36,20 +36,20 @@ class Message(object):
         self.charset = charset
 
     def __repr__(self):
-        return "<Email: To:%r Subject:%r>" % (self.to, self.subject)
+        return '<Email: To:%r Subject:%r>' % (self.to, self.subject)
 
     def __str__(self):
-        msg = MIMEText(self.body, "plain", self.charset)
-        msg["Subject"] = self.subject
-        msg["From"] = self.sender
-        msg["To"] = ", ".join(self.to)
+        msg = MIMEText(self.body, 'plain', self.charset)
+        msg['Subject'] = self.subject
+        msg['From'] = self.sender
+        msg['To'] = ', '.join(self.to)
         return msg.as_string()
 
 
 class Mailer(object):
     supports_timeout = supports_timeout
 
-    def __init__(self, host="localhost", port=0, user=None, password=None,
+    def __init__(self, host='localhost', port=0, user=None, password=None,
             timeout=2, use_ssl=False, use_tls=False):
         self.host = host
         self.port = port
@@ -75,9 +75,9 @@ class Mailer(object):
             if not fail_silently:
                 raise
             warnings.warn(SendmailWarning(
-                "Mail could not be sent: %r %r\n%r" % (
-                    exc, {"To": ", ".join(message.to),
-                          "Subject": message.subject},
+                'Mail could not be sent: %r %r\n%r' % (
+                    exc, {'To': ', '.join(message.to),
+                          'Subject': message.subject},
                     traceback.format_stack())))
 
     def _send(self, message, **kwargs):
@@ -136,7 +136,7 @@ class ErrorMail(object):
 
     # pep8.py borks on a inline signature separator and
     # says "trailing whitespace" ;)
-    EMAIL_SIGNATURE_SEP = "-- "
+    EMAIL_SIGNATURE_SEP = '-- '
 
     #: Format string used to generate error email subjects.
     subject = """\
@@ -157,15 +157,15 @@ The contents of the full traceback was:
 %(EMAIL_SIGNATURE_SEP)s
 Just to let you know,
 py-celery at %%(hostname)s.
-""" % {"EMAIL_SIGNATURE_SEP": EMAIL_SIGNATURE_SEP}
+""" % {'EMAIL_SIGNATURE_SEP': EMAIL_SIGNATURE_SEP}
 
     error_whitelist = None
 
     def __init__(self, task, **kwargs):
         self.task = task
-        self.email_subject = kwargs.get("subject", self.subject)
-        self.email_body = kwargs.get("body", self.body)
-        self.error_whitelist = getattr(task, "error_whitelist")
+        self.email_subject = kwargs.get('subject', self.subject)
+        self.email_body = kwargs.get('body', self.body)
+        self.error_whitelist = getattr(task, 'error_whitelist')
 
     def should_send(self, context, exc):
         """Returns true or false depending on if a task error mail

+ 4 - 4
celery/utils/serialization.py

@@ -68,12 +68,12 @@ def find_nearest_pickleable_exception(exc):
 
     """
     cls = exc.__class__
-    getmro_ = getattr(cls, "mro", None)
+    getmro_ = getattr(cls, 'mro', None)
 
     # old-style classes doesn't have mro()
     if not getmro_:  # pragma: no cover
         # all Py2.4 exceptions has a baseclass.
-        if not getattr(cls, "__bases__", ()):
+        if not getattr(cls, '__bases__', ()):
             return
         # Use inspect.getmro() to traverse bases instead.
         getmro_ = lambda: inspect.getmro(cls)
@@ -84,7 +84,7 @@ def find_nearest_pickleable_exception(exc):
             # we don't care about these.
             return
         try:
-            exc_args = getattr(exc, "args", [])
+            exc_args = getattr(exc, 'args', [])
             superexc = supercls(*exc_args)
             pickle.dumps(superexc)
         except:
@@ -148,7 +148,7 @@ class UnpickleableExceptionWrapper(Exception):
     def from_exception(cls, exc):
         return cls(exc.__class__.__module__,
                    exc.__class__.__name__,
-                   getattr(exc, "args", []),
+                   getattr(exc, 'args', []),
                    safe_repr(exc))
 
 

+ 22 - 22
celery/utils/term.py

@@ -13,13 +13,13 @@ import platform
 from .encoding import safe_str
 
 BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
-OP_SEQ = "\033[%dm"
-RESET_SEQ = "\033[0m"
-COLOR_SEQ = "\033[1;%dm"
+OP_SEQ = '\033[%dm'
+RESET_SEQ = '\033[0m'
+COLOR_SEQ = '\033[1;%dm'
 fg = lambda s: COLOR_SEQ % s
 
 SYSTEM = platform.system()
-IS_WINDOWS = SYSTEM == "Windows"
+IS_WINDOWS = SYSTEM == 'Windows'
 
 
 class colored(object):
@@ -27,25 +27,25 @@ class colored(object):
 
     Example::
         >>> c = colored(enabled=True)
-        >>> print(str(c.red("the quick "), c.blue("brown ", c.bold("fox ")),
-        ...       c.magenta(c.underline("jumps over")),
-        ...       c.yellow(" the lazy "),
-        ...       c.green("dog ")))
+        >>> print(str(c.red('the quick '), c.blue('brown ', c.bold('fox ')),
+        ...       c.magenta(c.underline('jumps over')),
+        ...       c.yellow(' the lazy '),
+        ...       c.green('dog ')))
 
     """
 
     def __init__(self, *s, **kwargs):
         self.s = s
-        self.enabled = not IS_WINDOWS and kwargs.get("enabled", True)
-        self.op = kwargs.get("op", "")
-        self.names = {"black": self.black,
-                      "red": self.red,
-                      "green": self.green,
-                      "yellow": self.yellow,
-                      "blue": self.blue,
-                      "magenta": self.magenta,
-                      "cyan": self.cyan,
-                      "white": self.white}
+        self.enabled = not IS_WINDOWS and kwargs.get('enabled', True)
+        self.op = kwargs.get('op', '')
+        self.names = {'black': self.black,
+                      'red': self.red,
+                      'green': self.green,
+                      'yellow': self.yellow,
+                      'blue': self.blue,
+                      'magenta': self.magenta,
+                      'cyan': self.cyan,
+                      'white': self.white}
 
     def _add(self, a, b):
         return unicode(a) + unicode(b)
@@ -64,16 +64,16 @@ class colored(object):
     def no_color(self):
         if self.s:
             return reduce(self._fold_no_color, self.s)
-        return ""
+        return ''
 
     def embed(self):
-        prefix = ""
+        prefix = ''
         if self.enabled:
             prefix = self.op
         return prefix + safe_str(reduce(self._add, self.s))
 
     def __unicode__(self):
-        suffix = ""
+        suffix = ''
         if self.enabled:
             suffix = RESET_SEQ
         return safe_str(self.embed() + suffix)
@@ -148,7 +148,7 @@ class colored(object):
         return self.node(s, fg(40 + WHITE))
 
     def reset(self, *s):
-        return self.node(s or [""], RESET_SEQ)
+        return self.node(s or [''], RESET_SEQ)
 
     def __add__(self, other):
         return unicode(self) + unicode(other)

+ 11 - 11
celery/utils/text.py

@@ -22,7 +22,7 @@ def dedent(s, n=4):
 
 
 def fill_paragraphs(s, width):
-    return '\n'.join(textwrap.fill(p, width) for p in s.split("\n"))
+    return '\n'.join(textwrap.fill(p, width) for p in s.split('\n'))
 
 
 def join(l):
@@ -31,13 +31,13 @@ def join(l):
 
 def ensure_2lines(s):
     if len(s.splitlines()) <= 2:
-        return s + "\n"
+        return s + '\n'
     return s
 
 
-def abbr(S, max, ellipsis="..."):
+def abbr(S, max, ellipsis='...'):
     if S is None:
-        return "???"
+        return '???'
     if len(S) > max:
         return ellipsis and (S[:max - len(ellipsis)] + ellipsis) or S[:max]
     return S
@@ -45,23 +45,23 @@ def abbr(S, max, ellipsis="..."):
 
 def abbrtask(S, max):
     if S is None:
-        return "???"
+        return '???'
     if len(S) > max:
-        module, _, cls = S.rpartition(".")
+        module, _, cls = S.rpartition('.')
         module = abbr(module, max - len(cls) - 3, False)
-        return module + "[.]" + cls
+        return module + '[.]' + cls
     return S
 
 
 def indent(t, indent=0):
     """Indent text."""
-    return "\n".join(" " * indent + p for p in t.split("\n"))
+    return '\n'.join(' ' * indent + p for p in t.split('\n'))
 
 
 def truncate(text, maxlen=128, suffix='...'):
     """Truncates text to a maximum number of characters."""
     if len(text) >= maxlen:
-        return text[:maxlen].rsplit(" ", 1)[0] + suffix
+        return text[:maxlen].rsplit(' ', 1)[0] + suffix
     return text
 
 
@@ -73,8 +73,8 @@ def pluralize(n, text, suffix='s'):
 
 def pretty(value, width=80, nl_width=80, **kw):
     if isinstance(value, dict):
-        return "{\n %s" % (pformat(value, 4, nl_width)[1:])
+        return '{\n %s' % (pformat(value, 4, nl_width)[1:])
     elif isinstance(value, tuple):
-        return "\n%s%s" % (' ' * 4, pformat(value, width=nl_width, **kw))
+        return '\n%s%s' % (' ' * 4, pformat(value, width=nl_width, **kw))
     else:
         return pformat(value, width=width, **kw)

+ 9 - 9
celery/utils/threads.py

@@ -18,25 +18,25 @@ from kombu.syn import detect_environment
 _Thread = threading.Thread
 _Event = threading._Event
 
-active_count = (getattr(threading, "active_count", None) or
+active_count = (getattr(threading, 'active_count', None) or
                 threading.activeCount)
 
 
 class Event(_Event):
 
-    if not hasattr(_Event, "is_set"):     # pragma: no cover
+    if not hasattr(_Event, 'is_set'):     # pragma: no cover
         is_set = _Event.isSet
 
 
 class Thread(_Thread):
 
-    if not hasattr(_Thread, "is_alive"):  # pragma: no cover
+    if not hasattr(_Thread, 'is_alive'):  # pragma: no cover
         is_alive = _Thread.isAlive
 
-    if not hasattr(_Thread, "daemon"):    # pragma: no cover
+    if not hasattr(_Thread, 'daemon'):    # pragma: no cover
         daemon = property(_Thread.isDaemon, _Thread.setDaemon)
 
-    if not hasattr(_Thread, "name"):      # pragma: no cover
+    if not hasattr(_Thread, 'name'):      # pragma: no cover
         name = property(_Thread.getName, _Thread.setName)
 
 
@@ -50,10 +50,10 @@ class bgThread(Thread):
         self.name = name or self.__class__.__name__
 
     def body(self):
-        raise NotImplementedError("subclass responsibility")
+        raise NotImplementedError('subclass responsibility')
 
     def on_crash(self, msg, *fmt, **kwargs):
-        sys.stderr.write((msg + "\n") % fmt)
+        sys.stderr.write((msg + '\n') % fmt)
         exc_info = sys.exc_info()
         try:
             traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
@@ -70,7 +70,7 @@ class bgThread(Thread):
                     body()
                 except Exception, exc:
                     try:
-                        self.on_crash("%r crashed: %r", self.name, exc)
+                        self.on_crash('%r crashed: %r', self.name, exc)
                         self._set_stopped()
                     finally:
                         os._exit(1)  # exiting by normal means won't work
@@ -92,7 +92,7 @@ class bgThread(Thread):
         if self.is_alive():
             self.join(1e100)
 
-if detect_environment() == "default":
+if detect_environment() == 'default':
     class LocalStack(threading.local):
 
         def __init__(self):

+ 19 - 19
celery/utils/timer2.py

@@ -23,16 +23,16 @@ from datetime import datetime, timedelta
 from kombu.log import get_logger
 
 VERSION = (1, 0, 0)
-__version__ = ".".join(map(str, VERSION))
-__author__ = "Ask Solem"
-__contact__ = "ask@celeryproject.org"
-__homepage__ = "http://github.com/ask/timer2/"
-__docformat__ = "restructuredtext"
+__version__ = '.'.join(map(str, VERSION))
+__author__ = 'Ask Solem'
+__contact__ = 'ask@celeryproject.org'
+__homepage__ = 'http://github.com/ask/timer2/'
+__docformat__ = 'restructuredtext'
 
 DEFAULT_MAX_INTERVAL = 2
-TIMER_DEBUG = os.environ.get("TIMER_DEBUG")
+TIMER_DEBUG = os.environ.get('TIMER_DEBUG')
 
-logger = get_logger("timer2")
+logger = get_logger('timer2')
 
 
 class Entry(object):
@@ -51,13 +51,13 @@ class Entry(object):
         self.tref.cancelled = True
 
     def __repr__(self):
-        return "<TimerEntry: %s(*%r, **%r)" % (
+        return '<TimerEntry: %s(*%r, **%r)' % (
                 self.fun.__name__, self.args, self.kwargs)
 
     if sys.version_info[0] == 3:  # pragma: no cover
 
         def __hash__(self):
-            return hash("|".join(map(repr, (self.fun, self.args,
+            return hash('|'.join(map(repr, (self.fun, self.args,
                                             self.kwargs))))
 
         def __lt__(self, other):
@@ -92,7 +92,7 @@ class Schedule(object):
             entry()
         except Exception, exc:
             if not self.handle_error(exc):
-                logger.error("Error in timer: %r", exc, exc_info=True)
+                logger.error('Error in timer: %r', exc, exc_info=True)
 
     def handle_error(self, exc_info):
         if self.on_error:
@@ -194,7 +194,7 @@ class Schedule(object):
                              # and the operation is atomic.
 
     def info(self):
-        return ({"eta": eta, "priority": priority, "item": item}
+        return ({'eta': eta, 'priority': priority, 'item': item}
                     for eta, priority, item in self.queue)
 
     def cancel(self, tref):
@@ -217,7 +217,7 @@ class Timer(Thread):
     if TIMER_DEBUG:  # pragma: no cover
         def start(self, *args, **kwargs):
             import traceback
-            print("- Timer starting")
+            print('- Timer starting')
             traceback.print_stack()
             super(Timer, self).start(*args, **kwargs)
 
@@ -233,7 +233,7 @@ class Timer(Thread):
         self.mutex = Lock()
         self.not_empty = Condition(self.mutex)
         self.setDaemon(True)
-        self.setName("Timer-%s" % (self._timer_count(), ))
+        self.setName('Timer-%s' % (self._timer_count(), ))
 
     def _next_entry(self):
         with self.not_empty:
@@ -265,7 +265,7 @@ class Timer(Thread):
                 # so gc collected built-in modules.
                 pass
         except Exception, exc:
-            logger.error("Thread Timer crashed: %r", exc, exc_info=True)
+            logger.error('Thread Timer crashed: %r', exc, exc_info=True)
             os._exit(1)
 
     def stop(self):
@@ -287,19 +287,19 @@ class Timer(Thread):
             return entry
 
     def enter(self, entry, eta, priority=None):
-        return self._do_enter("enter", entry, eta, priority=priority)
+        return self._do_enter('enter', entry, eta, priority=priority)
 
     def apply_at(self, *args, **kwargs):
-        return self._do_enter("apply_at", *args, **kwargs)
+        return self._do_enter('apply_at', *args, **kwargs)
 
     def enter_after(self, *args, **kwargs):
-        return self._do_enter("enter_after", *args, **kwargs)
+        return self._do_enter('enter_after', *args, **kwargs)
 
     def apply_after(self, *args, **kwargs):
-        return self._do_enter("apply_after", *args, **kwargs)
+        return self._do_enter('apply_after', *args, **kwargs)
 
     def apply_interval(self, *args, **kwargs):
-        return self._do_enter("apply_interval", *args, **kwargs)
+        return self._do_enter('apply_interval', *args, **kwargs)
 
     def exit_after(self, msecs, priority=10):
         self.apply_after(msecs, sys.exit, priority)

+ 19 - 19
celery/utils/timeutils.py

@@ -24,20 +24,20 @@ except ImportError:     # pragma: no cover
     pytz = None         # noqa
 
 
-DAYNAMES = "sun", "mon", "tue", "wed", "thu", "fri", "sat"
+DAYNAMES = 'sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'
 WEEKDAYS = dict((name, dow) for name, dow in zip(DAYNAMES, range(7)))
 
-RATE_MODIFIER_MAP = {"s": lambda n: n,
-                     "m": lambda n: n / 60.0,
-                     "h": lambda n: n / 60.0 / 60.0}
+RATE_MODIFIER_MAP = {'s': lambda n: n,
+                     'm': lambda n: n / 60.0,
+                     'h': lambda n: n / 60.0 / 60.0}
 
 
-HAVE_TIMEDELTA_TOTAL_SECONDS = hasattr(timedelta, "total_seconds")
+HAVE_TIMEDELTA_TOTAL_SECONDS = hasattr(timedelta, 'total_seconds')
 
-TIME_UNITS = (("day", 60 * 60 * 24.0, lambda n: "%.2f" % n),
-              ("hour", 60 * 60.0, lambda n: "%.2f" % n),
-              ("minute", 60.0, lambda n: "%.2f" % n),
-              ("second", 1.0, lambda n: "%.2f" % n))
+TIME_UNITS = (('day', 60 * 60 * 24.0, lambda n: '%.2f' % n),
+              ('hour', 60 * 60.0, lambda n: '%.2f' % n),
+              ('minute', 60.0, lambda n: '%.2f' % n),
+              ('second', 1.0, lambda n: '%.2f' % n))
 
 
 class _Zone(object):
@@ -54,10 +54,10 @@ class _Zone(object):
     def get_timezone(self, zone):
         if isinstance(zone, basestring):
             if pytz is None:
-                if zone == "UTC":
-                    return tz.gettz("UTC")
+                if zone == 'UTC':
+                    return tz.gettz('UTC')
                 raise ImproperlyConfigured(
-                    "Timezones requires the pytz library")
+                    'Timezones requires the pytz library')
             return pytz.timezone(zone)
         return zone
 
@@ -67,7 +67,7 @@ class _Zone(object):
 
     @cached_property
     def utc(self):
-        return self.get_timezone("UTC")
+        return self.get_timezone('UTC')
 timezone = _Zone()
 
 
@@ -150,8 +150,8 @@ def rate(rate):
     and converts them to seconds."""
     if rate:
         if isinstance(rate, basestring):
-            ops, _, modifier = rate.partition("/")
-            return RATE_MODIFIER_MAP[modifier or "s"](int(ops)) or 0
+            ops, _, modifier = rate.partition('/')
+            return RATE_MODIFIER_MAP[modifier or 's'](int(ops)) or 0
         return rate or 0
     return 0
 
@@ -161,7 +161,7 @@ def weekday(name):
 
     Example::
 
-        >>> weekday("sunday"), weekday("sun"), weekday("mon")
+        >>> weekday('sunday'), weekday('sun'), weekday('mon')
         (0, 0, 1)
 
     """
@@ -173,16 +173,16 @@ def weekday(name):
         raise KeyError(name)
 
 
-def humanize_seconds(secs, prefix=""):
+def humanize_seconds(secs, prefix=''):
     """Show seconds in human form, e.g. 60 is "1 minute", 7200 is "2
     hours"."""
     secs = float(secs)
     for unit, divider, formatter in TIME_UNITS:
         if secs >= divider:
             w = secs / divider
-            return "%s%s %s" % (prefix, formatter(w),
+            return '%s%s %s' % (prefix, formatter(w),
                                 pluralize(w, unit))
-    return "now"
+    return 'now'
 
 
 def maybe_iso8601(dt):

+ 38 - 38
celery/worker/__init__.py

@@ -56,11 +56,11 @@ class Namespace(abstract.Namespace):
     own set of built-in boot-step modules.
 
     """
-    name = "worker"
-    builtin_boot_steps = ("celery.worker.autoscale",
-                          "celery.worker.autoreload",
-                          "celery.worker.consumer",
-                          "celery.worker.mediator")
+    name = 'worker'
+    builtin_boot_steps = ('celery.worker.autoscale',
+                          'celery.worker.autoreload',
+                          'celery.worker.consumer',
+                          'celery.worker.mediator')
 
     def modules(self):
         return (self.builtin_boot_steps
@@ -81,8 +81,8 @@ class Pool(abstract.StartStopComponent):
         * min_concurrency
 
     """
-    name = "worker.pool"
-    requires = ("queues", )
+    name = 'worker.pool'
+    requires = ('queues', )
 
     def __init__(self, w, autoscale=None, no_execv=False, **kwargs):
         w.autoscale = autoscale
@@ -104,7 +104,7 @@ class Pool(abstract.StartStopComponent):
         now = time.time
 
         if not pool.did_start_ok():
-            raise WorkerLostError("Could not start worker processes")
+            raise WorkerLostError('Could not start worker processes')
 
         hub.update_readers(pool.readers)
         for handler, interval in pool.timers.iteritems():
@@ -126,7 +126,7 @@ class Pool(abstract.StartStopComponent):
         def on_timeout_cancel(result):
             try:
                 result._tref.cancel()
-                delattr(result, "_tref")
+                delattr(result, '_tref')
             except AttributeError:
                 pass
 
@@ -166,7 +166,7 @@ class Beat(abstract.StartStopComponent):
     argument is set.
 
     """
-    name = "worker.beat"
+    name = 'worker.beat'
 
     def __init__(self, w, beat=False, **kwargs):
         self.enabled = w.beat = beat
@@ -183,8 +183,8 @@ class Beat(abstract.StartStopComponent):
 class Queues(abstract.Component):
     """This component initializes the internal queues
     used by the worker."""
-    name = "worker.queues"
-    requires = ("ev", )
+    name = 'worker.queues'
+    requires = ('ev', )
 
     def create(self, w):
         w.start_mediator = True
@@ -207,7 +207,7 @@ class Queues(abstract.Component):
 
 
 class EvLoop(abstract.StartStopComponent):
-    name = "worker.ev"
+    name = 'worker.ev'
 
     def __init__(self, w, **kwargs):
         w.hub = None
@@ -223,8 +223,8 @@ class EvLoop(abstract.StartStopComponent):
 
 class Timers(abstract.Component):
     """This component initializes the internal timers used by the worker."""
-    name = "worker.timers"
-    requires = ("pool", )
+    name = 'worker.timers'
+    requires = ('pool', )
 
     def include_if(self, w):
         return not w.use_eventloop
@@ -240,15 +240,15 @@ class Timers(abstract.Component):
                                    on_timer_tick=self.on_timer_tick)
 
     def on_timer_error(self, exc):
-        logger.error("Timer error: %r", exc, exc_info=True)
+        logger.error('Timer error: %r', exc, exc_info=True)
 
     def on_timer_tick(self, delay):
-        logger.debug("Timer wake-up! Next eta %s secs.", delay)
+        logger.debug('Timer wake-up! Next eta %s secs.', delay)
 
 
 class StateDB(abstract.Component):
     """This component sets up the workers state db if enabled."""
-    name = "worker.state-db"
+    name = 'worker.state-db'
 
     def __init__(self, w, **kwargs):
         self.enabled = w.state_db
@@ -268,17 +268,17 @@ class WorkController(configurated):
     app = None
     concurrency = from_config()
     loglevel = logging.ERROR
-    logfile = from_config("log_file")
+    logfile = from_config('log_file')
     send_events = from_config()
-    pool_cls = from_config("pool")
-    consumer_cls = from_config("consumer")
-    mediator_cls = from_config("mediator")
-    timer_cls = from_config("timer")
-    timer_precision = from_config("timer_precision")
-    autoscaler_cls = from_config("autoscaler")
-    autoreloader_cls = from_config("autoreloader")
+    pool_cls = from_config('pool')
+    consumer_cls = from_config('consumer')
+    mediator_cls = from_config('mediator')
+    timer_cls = from_config('timer')
+    timer_precision = from_config('timer_precision')
+    autoscaler_cls = from_config('autoscaler')
+    autoreloader_cls = from_config('autoreloader')
     schedule_filename = from_config()
-    scheduler_cls = from_config("celerybeat_scheduler")
+    scheduler_cls = from_config('celerybeat_scheduler')
     task_time_limit = from_config()
     task_soft_time_limit = from_config()
     max_tasks_per_child = from_config()
@@ -306,7 +306,7 @@ class WorkController(configurated):
         trace._tasks = self.app._tasks
 
         self._shutdown_complete = Event()
-        self.setup_defaults(kwargs, namespace="celeryd")
+        self.setup_defaults(kwargs, namespace='celeryd')
         self.app.select_queues(queues)  # select queues subset.
 
         # Options
@@ -316,7 +316,7 @@ class WorkController(configurated):
         self._finalize = Finalize(self, self.stop, exitpriority=1)
         self.pidfile = pidfile
         self.pidlock = None
-        self.use_eventloop = (detect_environment() == "default" and
+        self.use_eventloop = (detect_environment() == 'default' and
                               self.app.broker_connection().is_evented and
                               not self.app.IS_WINDOWS)
 
@@ -340,15 +340,15 @@ class WorkController(configurated):
             self.pidlock = platforms.create_pidlock(self.pidfile)
         try:
             for i, component in enumerate(self.components):
-                logger.debug("Starting %s...", qualname(component))
+                logger.debug('Starting %s...', qualname(component))
                 self._running = i + 1
                 if component:
                     component.start()
-                logger.debug("%s OK!", qualname(component))
+                logger.debug('%s OK!', qualname(component))
         except SystemTerminate:
             self.terminate()
         except Exception, exc:
-            logger.error("Unrecoverable error: %r", exc,
+            logger.error('Unrecoverable error: %r', exc,
                          exc_info=True)
             self.stop()
         except (KeyboardInterrupt, SystemExit):
@@ -366,7 +366,7 @@ class WorkController(configurated):
         try:
             req.execute_using_pool(self.pool)
         except Exception, exc:
-            logger.critical("Internal error: %r\n%s",
+            logger.critical('Internal error: %r\n%s',
                             exc, traceback.format_exc(), exc_info=True)
         except SystemTerminate:
             self.terminate()
@@ -394,7 +394,7 @@ class WorkController(configurated):
             self._shutdown(warm=False)
 
     def _shutdown(self, warm=True):
-        what = "Stopping" if warm else "Terminating"
+        what = 'Stopping' if warm else 'Terminating'
 
         if self._state in (self.CLOSE, self.TERMINATE):
             return
@@ -410,11 +410,11 @@ class WorkController(configurated):
         self._state = self.CLOSE
 
         for component in reversed(self.components):
-            logger.debug("%s %s...", what, qualname(component))
+            logger.debug('%s %s...', what, qualname(component))
             if component:
                 stop = component.stop
                 if not warm:
-                    stop = getattr(component, "terminate", None) or stop
+                    stop = getattr(component, 'terminate', None) or stop
                 stop()
 
         self.timer.stop()
@@ -431,10 +431,10 @@ class WorkController(configurated):
 
         for module in set(modules or ()):
             if module not in sys.modules:
-                logger.debug("importing module %s", module)
+                logger.debug('importing module %s', module)
                 imp(module)
             elif reload:
-                logger.debug("reloading module %s", module)
+                logger.debug('reloading module %s', module)
                 reload_from_cwd(sys.modules[module], reloader)
         self.pool.restart()
 

+ 10 - 10
celery/worker/abstract.py

@@ -60,14 +60,14 @@ class Namespace(object):
         will also be added the the objects ``components`` attribute.
 
         """
-        self._debug("Loading modules.")
+        self._debug('Loading modules.')
         self.load_modules()
-        self._debug("Claiming components.")
+        self._debug('Claiming components.')
         self.components = self._claim()
-        self._debug("Building boot step graph.")
+        self._debug('Building boot step graph.')
         self.boot_steps = [self.bind_component(name, parent, **kwargs)
                                 for name in self._finalize_boot_steps()]
-        self._debug("New boot order: {%s}",
+        self._debug('New boot order: {%s}',
                 ', '.join(c.name for c in self.boot_steps))
 
         for component in self.boot_steps:
@@ -105,7 +105,7 @@ class Namespace(object):
         return self._unclaimed[self.name]
 
     def _debug(self, msg, *args):
-        return logger.debug("[%s] " + msg,
+        return logger.debug('[%s] ' + msg,
                             *(self.name.capitalize(), ) + args)
 
 
@@ -113,15 +113,15 @@ class ComponentType(type):
     """Metaclass for components."""
 
     def __new__(cls, name, bases, attrs):
-        abstract = attrs.pop("abstract", False)
+        abstract = attrs.pop('abstract', False)
         if not abstract:
             try:
-                cname = attrs["name"]
+                cname = attrs['name']
             except KeyError:
-                raise NotImplementedError("Components must be named")
-            namespace = attrs.get("namespace", None)
+                raise NotImplementedError('Components must be named')
+            namespace = attrs.get('namespace', None)
             if not namespace:
-                attrs["namespace"], _, attrs["name"] = cname.partition('.')
+                attrs['namespace'], _, attrs['name'] = cname.partition('.')
         cls = super(ComponentType, cls).__new__(cls, name, bases, attrs)
         if not abstract:
             Namespace._unclaimed[cls.namespace][cls.name] = cls

+ 16 - 16
celery/worker/autoreload.py

@@ -37,8 +37,8 @@ logger = get_logger(__name__)
 
 
 class WorkerComponent(StartStopComponent):
-    name = "worker.autoreloader"
-    requires = ("pool", )
+    name = 'worker.autoreloader'
+    requires = ('pool', )
 
     def __init__(self, w, autoreload=None, **kwargs):
         self.enabled = w.autoreload = autoreload
@@ -54,14 +54,14 @@ class WorkerComponent(StartStopComponent):
         return w.autoreloader
 
     def create(self, w):
-        if hasattr(select, "kqueue") and w.use_eventloop:
+        if hasattr(select, 'kqueue') and w.use_eventloop:
             return self.create_ev(w)
         return self.create_threaded(w)
 
 
-def file_hash(filename, algorithm="md5"):
+def file_hash(filename, algorithm='md5'):
     hobj = hashlib.new(algorithm)
-    with open(filename, "rb") as f:
+    with open(filename, 'rb') as f:
         for chunk in iter(lambda: f.read(2 ** 20), ''):
             hobj.update(chunk)
     return hobj.digest()
@@ -78,7 +78,7 @@ class BaseMonitor(object):
         self.shutdown_event = shutdown_event or Event()
 
     def start(self):
-        raise NotImplementedError("Subclass responsibility")
+        raise NotImplementedError('Subclass responsibility')
 
     def stop(self):
         pass
@@ -199,18 +199,18 @@ class InotifyMonitor(_ProcessEvent):
 
 def default_implementation():
     # kqueue monitor not working properly at this time.
-    if hasattr(select, "kqueue"):
-        return "kqueue"
-    if sys.platform.startswith("linux") and pyinotify:
-        return "inotify"
+    if hasattr(select, 'kqueue'):
+        return 'kqueue'
+    if sys.platform.startswith('linux') and pyinotify:
+        return 'inotify'
     else:
-        return "stat"
+        return 'stat'
 
-implementations = {"kqueue": KQueueMonitor,
-                   "inotify": InotifyMonitor,
-                   "stat": StatMonitor}
+implementations = {'kqueue': KQueueMonitor,
+                   'inotify': InotifyMonitor,
+                   'stat': StatMonitor}
 Monitor = implementations[
-            os.environ.get("CELERYD_FSNOTIFY") or default_implementation()]
+            os.environ.get('CELERYD_FSNOTIFY') or default_implementation()]
 
 
 class Autoreloader(bgThread):
@@ -260,7 +260,7 @@ class Autoreloader(bgThread):
         modified = [f for f in files if self._maybe_modified(f)]
         if modified:
             names = [self._module_name(module) for module in modified]
-            logger.info("Detected modified modules: %r", names)
+            logger.info('Detected modified modules: %r', names)
             self._reload(names)
 
     def _reload(self, modules):

+ 10 - 10
celery/worker/autoscale.py

@@ -31,8 +31,8 @@ debug, info, error = logger.debug, logger.info, logger.error
 
 
 class WorkerComponent(StartStopComponent):
-    name = "worker.autoscaler"
-    requires = ("pool", )
+    name = 'worker.autoscaler'
+    requires = ('pool', )
 
     def __init__(self, w, **kwargs):
         self.enabled = w.autoscale
@@ -70,7 +70,7 @@ class Autoscaler(bgThread):
         self.keepalive = keepalive
         self._last_action = None
 
-        assert self.keepalive, "can't scale down too fast."
+        assert self.keepalive, 'cannot scale down too fast.'
 
     def body(self):
         with self.mutex:
@@ -129,23 +129,23 @@ class Autoscaler(bgThread):
             return self._shrink(n)
 
     def _grow(self, n):
-        info("Scaling up %s processes.", n)
+        info('Scaling up %s processes.', n)
         self.pool.grow(n)
 
     def _shrink(self, n):
-        info("Scaling down %s processes.", n)
+        info('Scaling down %s processes.', n)
         try:
             self.pool.shrink(n)
         except ValueError:
             debug("Autoscaler won't scale down: all processes busy.")
         except Exception, exc:
-            error("Autoscaler: scale_down: %r", exc, exc_info=True)
+            error('Autoscaler: scale_down: %r', exc, exc_info=True)
 
     def info(self):
-        return {"max": self.max_concurrency,
-                "min": self.min_concurrency,
-                "current": self.processes,
-                "qty": self.qty}
+        return {'max': self.max_concurrency,
+                'min': self.min_concurrency,
+                'current': self.processes,
+                'qty': self.qty}
 
     @property
     def qty(self):

+ 4 - 4
celery/worker/buckets.py

@@ -45,9 +45,9 @@ class TaskBucket(object):
     `feed.refresh` and `video.compress`, the TaskBucket will consist
     of the following items::
 
-        {"twitter.update": TokenBucketQueue(fill_rate=300),
-         "feed.refresh": Queue(),
-         "video.compress": TokenBucketQueue(fill_rate=2)}
+        {'twitter.update': TokenBucketQueue(fill_rate=300),
+         'feed.refresh': Queue(),
+         'video.compress': TokenBucketQueue(fill_rate=2)}
 
     The get operation will iterate over these until one of the buckets
     is able to return an item.  The underlying datastructure is a `dict`,
@@ -172,7 +172,7 @@ class TaskBucket(object):
 
     def update_bucket_for_type(self, task_name):
         task_type = self.task_registry[task_name]
-        rate_limit = getattr(task_type, "rate_limit", None)
+        rate_limit = getattr(task_type, 'rate_limit', None)
         rate_limit = timeutils.rate(rate_limit)
         task_queue = FastQueue()
         if task_name in self.buckets:

+ 28 - 28
celery/worker/consumer.py

@@ -151,11 +151,11 @@ info, warn, error, crit = (logger.info, logger.warn,
 
 
 def debug(msg, *args, **kwargs):
-    logger.debug("Consumer: %s" % (msg, ), *args, **kwargs)
+    logger.debug('Consumer: %s' % (msg, ), *args, **kwargs)
 
 
 class Component(StartStopComponent):
-    name = "worker.consumer"
+    name = 'worker.consumer'
     last = True
 
     def Consumer(self, w):
@@ -230,10 +230,10 @@ class QoS(object):
         if pcount != self.prev:
             new_value = pcount
             if pcount > PREFETCH_COUNT_MAX:
-                warn("QoS: Disabled: prefetch_count exceeds %r",
+                warn('QoS: Disabled: prefetch_count exceeds %r',
                      PREFETCH_COUNT_MAX)
                 new_value = 0
-            debug("basic.qos: prefetch_count->%s", new_value)
+            debug('basic.qos: prefetch_count->%s', new_value)
             self.consumer.qos(prefetch_count=new_value)
             self.prev = pcount
         return pcount
@@ -392,7 +392,7 @@ class Consumer(object):
                 if on_task_callbacks:
                     [callback() for callback in on_task_callbacks]
                 try:
-                    name = body["task"]
+                    name = body['task']
                 except (KeyError, TypeError):
                     return self.handle_unknown_message(body, message)
                 try:
@@ -405,7 +405,7 @@ class Consumer(object):
             self.task_consumer.callbacks = [on_task_received]
             self.task_consumer.consume()
 
-            debug("Ready to accept tasks!")
+            debug('Ready to accept tasks!')
 
             while self._state != CLOSE and self.connection:
                 # shutdown if signal handlers told us to.
@@ -458,13 +458,13 @@ class Consumer(object):
             return
 
         if self._does_info:
-            info("Got task from broker: %s", task.shortinfo())
+            info('Got task from broker: %s', task.shortinfo())
 
         if self.event_dispatcher.enabled:
-            self.event_dispatcher.send("task-received", uuid=task.id,
+            self.event_dispatcher.send('task-received', uuid=task.id,
                     name=task.name, args=safe_repr(task.args),
                     kwargs=safe_repr(task.kwargs),
-                    retries=task.request_dict.get("retries", 0),
+                    retries=task.request_dict.get('retries', 0),
                     eta=task.eta and task.eta.isoformat(),
                     expires=task.expires and task.expires.isoformat())
 
@@ -488,9 +488,9 @@ class Consumer(object):
         try:
             self.pidbox_node.handle_message(body, message)
         except KeyError, exc:
-            error("No such control command: %s", exc)
+            error('No such control command: %s', exc)
         except Exception, exc:
-            error("Control command error: %r", exc, exc_info=True)
+            error('Control command error: %r', exc, exc_info=True)
             self.reset_pidbox_node()
 
     def apply_eta_task(self, task):
@@ -526,7 +526,7 @@ class Consumer(object):
 
         """
         try:
-            name = body["task"]
+            name = body['task']
         except (KeyError, TypeError):
             return self.handle_unknown_message(body, message)
 
@@ -555,14 +555,14 @@ class Consumer(object):
         connection, self.connection = self.connection, None
 
         if self.task_consumer:
-            debug("Closing consumer channel...")
+            debug('Closing consumer channel...')
             self.task_consumer = \
                     self.maybe_conn_error(self.task_consumer.close)
 
         self.stop_pidbox_node()
 
         if connection:
-            debug("Closing broker connection...")
+            debug('Closing broker connection...')
             self.maybe_conn_error(connection.close)
 
     def stop_consumers(self, close_connection=True):
@@ -578,19 +578,19 @@ class Consumer(object):
 
         if self.heart:
             # Stop the heartbeat thread if it's running.
-            debug("Heart: Going into cardiac arrest...")
+            debug('Heart: Going into cardiac arrest...')
             self.heart = self.heart.stop()
 
-        debug("Cancelling task consumer...")
+        debug('Cancelling task consumer...')
         if self.task_consumer:
             self.maybe_conn_error(self.task_consumer.cancel)
 
         if self.event_dispatcher:
-            debug("Shutting down event dispatcher...")
+            debug('Shutting down event dispatcher...')
             self.event_dispatcher = \
                     self.maybe_conn_error(self.event_dispatcher.close)
 
-        debug("Cancelling broadcast consumer...")
+        debug('Cancelling broadcast consumer...')
         if self.broadcast_consumer:
             self.maybe_conn_error(self.broadcast_consumer.cancel)
 
@@ -633,11 +633,11 @@ class Consumer(object):
     def stop_pidbox_node(self):
         if self._pidbox_node_stopped:
             self._pidbox_node_shutdown.set()
-            debug("Waiting for broadcast thread to shutdown...")
+            debug('Waiting for broadcast thread to shutdown...')
             self._pidbox_node_stopped.wait()
             self._pidbox_node_stopped = self._pidbox_node_shutdown = None
         elif self.broadcast_consumer:
-            debug("Closing broadcast channel...")
+            debug('Closing broadcast channel...')
             self.broadcast_consumer = \
                 self.maybe_conn_error(self.broadcast_consumer.channel.close)
 
@@ -665,7 +665,7 @@ class Consumer(object):
     def reset_connection(self):
         """Re-establish the broker connection and set up consumers,
         heartbeat and the event dispatcher."""
-        debug("Re-establishing connection to the broker...")
+        debug('Re-establishing connection to the broker...')
         self.stop_consumers()
 
         # Clear internal queues to get rid of old messages.
@@ -676,7 +676,7 @@ class Consumer(object):
 
         # Re-establish the broker connection and setup the task consumer.
         self.connection = self._open_connection()
-        debug("Connection established.")
+        debug('Connection established.')
         self.task_consumer = self.app.amqp.TaskConsumer(self.connection,
                                     on_decode_error=self.on_decode_error)
         # QoS: Reset prefetch window.
@@ -725,8 +725,8 @@ class Consumer(object):
         # Callback called for each retry while the connection
         # can't be established.
         def _error_handler(exc, interval):
-            error("Consumer: Connection Error: %s. "
-                  "Trying again in %d seconds...", exc, interval)
+            error('Consumer: Connection Error: %s. '
+                  'Trying again in %d seconds...', exc, interval)
 
         # remember that the connection is lazy, it won't establish
         # until it's needed.
@@ -750,7 +750,7 @@ class Consumer(object):
         # Notifies other threads that this instance can't be used
         # anymore.
         self.close()
-        debug("Stopping consumers...")
+        debug('Stopping consumers...')
         self.stop_consumers(close_connection=False)
 
     def close(self):
@@ -774,8 +774,8 @@ class Consumer(object):
         conninfo = {}
         if self.connection:
             conninfo = self.connection.info()
-            conninfo.pop("password", None)  # don't send password.
-        return {"broker": conninfo, "prefetch_count": self.qos.value}
+            conninfo.pop('password', None)  # don't send password.
+        return {'broker': conninfo, 'prefetch_count': self.qos.value}
 
 
 class BlockingConsumer(Consumer):
@@ -785,7 +785,7 @@ class BlockingConsumer(Consumer):
         self.task_consumer.register_callback(self.receive_message)
         self.task_consumer.consume()
 
-        debug("Ready to accept tasks!")
+        debug('Ready to accept tasks!')
 
         while self._state != CLOSE and self.connection:
             self.maybe_shutdown()

+ 63 - 63
celery/worker/control.py

@@ -20,7 +20,7 @@ from celery.utils.log import get_logger
 from . import state
 from .state import revoked
 
-TASK_INFO_FIELDS = ("exchange", "routing_key", "rate_limit")
+TASK_INFO_FIELDS = ('exchange', 'routing_key', 'rate_limit')
 logger = get_logger(__name__)
 
 
@@ -37,22 +37,22 @@ class Panel(UserDict):
 def revoke(panel, task_id, terminate=False, signal=None, **kwargs):
     """Revoke task by task id."""
     revoked.add(task_id)
-    action = "revoked"
+    action = 'revoked'
     if terminate:
-        signum = _signals.signum(signal or "TERM")
+        signum = _signals.signum(signal or 'TERM')
         for request in state.active_requests:
             if request.id == task_id:
-                action = "terminated (%s)" % (signum, )
+                action = 'terminated (%s)' % (signum, )
                 request.terminate(panel.consumer.pool, signal=signum)
                 break
 
-    logger.info("Task %s %s.", task_id, action)
-    return {"ok": "task %s %s" % (task_id, action)}
+    logger.info('Task %s %s.', task_id, action)
+    return {'ok': 'task %s %s' % (task_id, action)}
 
 
 @Panel.register
 def report(panel):
-    return {"ok": panel.app.bugreport()}
+    return {'ok': panel.app.bugreport()}
 
 
 @Panel.register
@@ -60,28 +60,28 @@ def enable_events(panel):
     dispatcher = panel.consumer.event_dispatcher
     if not dispatcher.enabled:
         dispatcher.enable()
-        dispatcher.send("worker-online")
-        logger.info("Events enabled by remote.")
-        return {"ok": "events enabled"}
-    return {"ok": "events already enabled"}
+        dispatcher.send('worker-online')
+        logger.info('Events enabled by remote.')
+        return {'ok': 'events enabled'}
+    return {'ok': 'events already enabled'}
 
 
 @Panel.register
 def disable_events(panel):
     dispatcher = panel.consumer.event_dispatcher
     if dispatcher.enabled:
-        dispatcher.send("worker-offline")
+        dispatcher.send('worker-offline')
         dispatcher.disable()
-        logger.info("Events disabled by remote.")
-        return {"ok": "events disabled"}
-    return {"ok": "events already disabled"}
+        logger.info('Events disabled by remote.')
+        return {'ok': 'events disabled'}
+    return {'ok': 'events already disabled'}
 
 
 @Panel.register
 def heartbeat(panel):
-    logger.debug("Heartbeat requested by remote.")
+    logger.debug('Heartbeat requested by remote.')
     dispatcher = panel.consumer.event_dispatcher
-    dispatcher.send("worker-heartbeat", freq=5, **state.SOFTWARE_INFO)
+    dispatcher.send('worker-heartbeat', freq=5, **state.SOFTWARE_INFO)
 
 
 @Panel.register
@@ -98,28 +98,28 @@ def rate_limit(panel, task_name, rate_limit, **kwargs):
     try:
         timeutils.rate(rate_limit)
     except ValueError, exc:
-        return {"error": "Invalid rate limit string: %s" % exc}
+        return {'error': 'Invalid rate limit string: %s' % exc}
 
     try:
         panel.app.tasks[task_name].rate_limit = rate_limit
     except KeyError:
-        logger.error("Rate limit attempt for unknown task %s",
+        logger.error('Rate limit attempt for unknown task %s',
                      task_name, exc_info=True)
-        return {"error": "unknown task"}
+        return {'error': 'unknown task'}
 
-    if not hasattr(panel.consumer.ready_queue, "refresh"):
-        logger.error("Rate limit attempt, but rate limits disabled.")
-        return {"error": "rate limits disabled"}
+    if not hasattr(panel.consumer.ready_queue, 'refresh'):
+        logger.error('Rate limit attempt, but rate limits disabled.')
+        return {'error': 'rate limits disabled'}
 
     panel.consumer.ready_queue.refresh()
 
     if not rate_limit:
-        logger.info("Rate limits disabled for tasks of type %s", task_name)
-        return {"ok": "rate limit disabled successfully"}
+        logger.info('Rate limits disabled for tasks of type %s', task_name)
+        return {'ok': 'rate limit disabled successfully'}
 
-    logger.info("New rate limit for tasks of type %s: %s.",
+    logger.info('New rate limit for tasks of type %s: %s.',
                 task_name, rate_limit)
-    return {"ok": "new rate limit set successfully"}
+    return {'ok': 'new rate limit set successfully'}
 
 
 @Panel.register
@@ -127,37 +127,37 @@ def time_limit(panel, task_name=None, hard=None, soft=None, **kwargs):
     try:
         task = panel.app.tasks[task_name]
     except KeyError:
-        logger.error("Change time limit attempt for unknown task %s",
+        logger.error('Change time limit attempt for unknown task %s',
                      task_name, exc_info=True)
-        return {"error": "unknown task"}
+        return {'error': 'unknown task'}
 
     task.soft_time_limit = soft
     task.time_limit = hard
 
-    logger.info("New time limits for tasks of type %s: soft=%s hard=%s",
+    logger.info('New time limits for tasks of type %s: soft=%s hard=%s',
                 task_name, soft, hard)
-    return {"ok": "time limits set successfully"}
+    return {'ok': 'time limits set successfully'}
 
 
 @Panel.register
 def dump_schedule(panel, safe=False, **kwargs):
     schedule = panel.consumer.timer.schedule
     if not schedule.queue:
-        logger.info("--Empty schedule--")
+        logger.info('--Empty schedule--')
         return []
 
-    formatitem = lambda (i, item): "%s. %s pri%s %r" % (i,
-            datetime.utcfromtimestamp(item["eta"]),
-            item["priority"],
-            item["item"])
+    formatitem = lambda (i, item): '%s. %s pri%s %r' % (i,
+            datetime.utcfromtimestamp(item['eta']),
+            item['priority'],
+            item['item'])
     info = map(formatitem, enumerate(schedule.info()))
-    logger.debug("* Dump of current schedule:\n%s", "\n".join(info))
+    logger.debug('* Dump of current schedule:\n%s', '\n'.join(info))
     scheduled_tasks = []
     for item in schedule.info():
-        scheduled_tasks.append({"eta": item["eta"],
-                                "priority": item["priority"],
-                                "request":
-                                    item["item"].args[0].info(safe=safe)})
+        scheduled_tasks.append({'eta': item['eta'],
+                                'priority': item['priority'],
+                                'request':
+                                    item['item'].args[0].info(safe=safe)})
     return scheduled_tasks
 
 
@@ -166,10 +166,10 @@ def dump_reserved(panel, safe=False, **kwargs):
     ready_queue = panel.consumer.ready_queue
     reserved = ready_queue.items
     if not reserved:
-        logger.info("--Empty queue--")
+        logger.info('--Empty queue--')
         return []
-    logger.debug("* Dump of currently reserved tasks:\n%s",
-                 "\n".join(map(safe_repr, reserved)))
+    logger.debug('* Dump of currently reserved tasks:\n%s',
+                 '\n'.join(map(safe_repr, reserved)))
     return [request.info(safe=safe)
             for request in reserved]
 
@@ -185,10 +185,10 @@ def stats(panel, **kwargs):
     asinfo = {}
     if panel.consumer.controller.autoscaler:
         asinfo = panel.consumer.controller.autoscaler.info()
-    return {"total": state.total_count,
-            "consumer": panel.consumer.info,
-            "pool": panel.consumer.pool.info,
-            "autoscaler": asinfo}
+    return {'total': state.total_count,
+            'consumer': panel.consumer.info,
+            'pool': panel.consumer.pool.info,
+            'autoscaler': asinfo}
 
 
 @Panel.register
@@ -204,21 +204,21 @@ def dump_tasks(panel, **kwargs):
         fields = dict((field, str(getattr(task, field, None)))
                         for field in TASK_INFO_FIELDS
                             if getattr(task, field, None) is not None)
-        info = map("=".join, fields.items())
+        info = map('='.join, fields.items())
         if not info:
             return task.name
-        return "%s [%s]" % (task.name, " ".join(info))
+        return '%s [%s]' % (task.name, ' '.join(info))
 
     info = map(_extract_info, (tasks[task]
                                     for task in sorted(tasks.keys())))
-    logger.debug("* Dump of currently registered tasks:\n%s", "\n".join(info))
+    logger.debug('* Dump of currently registered tasks:\n%s', '\n'.join(info))
 
     return info
 
 
 @Panel.register
 def ping(panel, **kwargs):
-    return "pong"
+    return 'pong'
 
 
 @Panel.register
@@ -227,7 +227,7 @@ def pool_grow(panel, n=1, **kwargs):
         panel.consumer.controller.autoscaler.force_scale_up(n)
     else:
         panel.consumer.pool.grow(n)
-    return {"ok": "spawned worker processes"}
+    return {'ok': 'spawned worker processes'}
 
 
 @Panel.register
@@ -236,13 +236,13 @@ def pool_shrink(panel, n=1, **kwargs):
         panel.consumer.controller.autoscaler.force_scale_down(n)
     else:
         panel.consumer.pool.shrink(n)
-    return {"ok": "terminated worker processes"}
+    return {'ok': 'terminated worker processes'}
 
 
 @Panel.register
 def pool_restart(panel, modules=None, reload=False, reloader=None, **kwargs):
     panel.consumer.controller.reload(modules, reload, reloader=reloader)
-    return {"ok": "reload started"}
+    return {'ok': 'reload started'}
 
 
 @Panel.register
@@ -250,12 +250,12 @@ def autoscale(panel, max=None, min=None):
     autoscaler = panel.consumer.controller.autoscaler
     if autoscaler:
         max_, min_ = autoscaler.update(max, min)
-        return {"ok": "autoscale now min=%r max=%r" % (max_, min_)}
-    raise ValueError("Autoscale not enabled")
+        return {'ok': 'autoscale now min=%r max=%r' % (max_, min_)}
+    raise ValueError('Autoscale not enabled')
 
 
 @Panel.register
-def shutdown(panel, msg="Got shutdown from remote", **kwargs):
+def shutdown(panel, msg='Got shutdown from remote', **kwargs):
     logger.warning(msg)
     raise SystemExit(msg)
 
@@ -266,7 +266,7 @@ def add_consumer(panel, queue, exchange=None, exchange_type=None,
     cset = panel.consumer.task_consumer
     exchange = queue if exchange is None else exchange
     routing_key = queue if routing_key is None else routing_key
-    exchange_type = "direct" if exchange_type is None else exchange_type
+    exchange_type = 'direct' if exchange_type is None else exchange_type
     if not cset.consuming_from(queue):
         q = panel.app.amqp.queues.add(queue,
                 exchange=exchange,
@@ -274,10 +274,10 @@ def add_consumer(panel, queue, exchange=None, exchange_type=None,
                 routing_key=routing_key, **options)
         cset.add_queue(q)
         cset.consume()
-        logger.info("Started consuming from %r", queue)
-        return {"ok": "started consuming from %r" % (queue, )}
+        logger.info('Started consuming from %r', queue)
+        return {'ok': 'started consuming from %r' % (queue, )}
     else:
-        return {"ok": "already consuming from %r" % (queue, )}
+        return {'ok': 'already consuming from %r' % (queue, )}
 
 
 @Panel.register
@@ -285,7 +285,7 @@ def cancel_consumer(panel, queue=None, **_):
     panel.app.amqp.queues.select_remove(queue)
     cset = panel.consumer.task_consumer
     cset.cancel_by_queue(queue)
-    return {"ok": "no longer consuming from %s" % (queue, )}
+    return {'ok': 'no longer consuming from %s' % (queue, )}
 
 
 @Panel.register

+ 3 - 3
celery/worker/heartbeat.py

@@ -38,13 +38,13 @@ class Heart(object):
 
     def start(self):
         if self.eventer.enabled:
-            self._send("worker-online")
+            self._send('worker-online')
             self.tref = self.timer.apply_interval(self.interval * 1000.0,
-                    self._send, ("worker-heartbeat", ))
+                    self._send, ('worker-heartbeat', ))
 
     def stop(self):
         if self.tref is not None:
             self.timer.cancel(self.tref)
             self.tref = None
         if self.eventer.enabled:
-            self._send("worker-offline")
+            self._send('worker-offline')

Alguns ficheiros não foram mostrados porque muitos ficheiros mudaram neste diff