Ask Solem пре 12 година
родитељ
комит
b23fd8c3f6
61 измењених фајлова са 358 додато и 324 уклоњено
  1. 1 1
      celery/__init__.py
  2. 4 7
      celery/app/amqp.py
  3. 1 1
      celery/app/routes.py
  4. 22 22
      celery/app/utils.py
  5. 17 17
      celery/apps/beat.py
  6. 3 5
      celery/backends/base.py
  7. 4 4
      celery/backends/cache.py
  8. 2 2
      celery/backends/database/models.py
  9. 2 2
      celery/backends/redis.py
  10. 6 7
      celery/beat.py
  11. 10 10
      celery/bin/camqadm.py
  12. 42 35
      celery/bin/celery.py
  13. 1 1
      celery/bin/celeryd.py
  14. 5 5
      celery/bin/celeryd_detach.py
  15. 26 27
      celery/bin/celeryd_multi.py
  16. 2 2
      celery/bin/celeryev.py
  17. 5 3
      celery/canvas.py
  18. 2 2
      celery/concurrency/base.py
  19. 2 2
      celery/contrib/batches.py
  20. 11 7
      celery/contrib/migrate.py
  21. 25 11
      celery/contrib/rdb.py
  22. 6 5
      celery/datastructures.py
  23. 17 13
      celery/events/cursesmon.py
  24. 6 6
      celery/exceptions.py
  25. 4 6
      celery/local.py
  26. 6 6
      celery/result.py
  27. 6 6
      celery/security/certificate.py
  28. 2 2
      celery/security/key.py
  29. 2 2
      celery/security/serialization.py
  30. 2 2
      celery/security/utils.py
  31. 1 1
      celery/tests/bin/test_celeryd_multi.py
  32. 2 2
      celery/tests/utilities/test_info.py
  33. 3 3
      celery/utils/debug.py
  34. 6 6
      celery/utils/dispatch/saferef.py
  35. 1 1
      celery/utils/dispatch/signal.py
  36. 2 1
      celery/utils/imports.py
  37. 2 2
      celery/utils/text.py
  38. 2 2
      celery/utils/threads.py
  39. 3 2
      celery/worker/consumer.py
  40. 1 1
      celery/worker/hub.py
  41. 1 1
      docs/configuration.rst
  42. 1 1
      docs/faq.rst
  43. 2 2
      docs/internals/deprecation.rst
  44. 3 3
      docs/tutorials/task-cookbook.rst
  45. 4 4
      docs/userguide/application.rst
  46. 2 2
      docs/userguide/calling.rst
  47. 5 3
      docs/userguide/canvas.rst
  48. 4 4
      docs/userguide/monitoring.rst
  49. 3 3
      docs/userguide/signals.rst
  50. 6 6
      docs/userguide/tasks.rst
  51. 1 1
      examples/celery_http_gateway/tasks.py
  52. 2 2
      examples/eventlet/tasks.py
  53. 1 1
      examples/eventlet/webcrawler.py
  54. 3 3
      examples/gevent/tasks.py
  55. 4 4
      examples/resultgraph/tasks.py
  56. 11 11
      extra/release/bump_version.py
  57. 4 2
      extra/release/verify_config_reference.py
  58. 9 7
      funtests/benchmarks/bench_worker.py
  59. 9 6
      funtests/suite/test_leak.py
  60. 14 14
      pavement.py
  61. 2 2
      setup.py

+ 1 - 1
celery/__init__.py

@@ -14,7 +14,7 @@ __author__ = 'Ask Solem'
 __contact__ = 'ask@celeryproject.org'
 __homepage__ = 'http://celeryproject.org'
 __docformat__ = 'restructuredtext'
-VERSION_BANNER = '%s (%s)' % (__version__, SERIES)
+VERSION_BANNER = '{0} ({1})'.format(__version__, SERIES)
 
 # -eof meta-
 

+ 4 - 7
celery/app/amqp.py

@@ -25,7 +25,8 @@ from . import routes as _routes
 
 #: Human readable queue declaration.
 QUEUE_FORMAT = """
-. %(name)s exchange:%(exchange)s(%(exchange_type)s) binding:%(routing_key)s
+. {0.name:<16} exchange={0.exchange.name}({0.exchange.type}) \
+key={0.routing_key}
 """
 
 
@@ -102,12 +103,8 @@ class Queues(dict):
         active = self.consume_from
         if not active:
             return ''
-        info = [QUEUE_FORMAT.strip() % {
-                    'name': (name + ':').ljust(12),
-                    'exchange': q.exchange.name,
-                    'exchange_type': q.exchange.type,
-                    'routing_key': q.routing_key}
-                        for name, q in sorted(active.iteritems())]
+        info = [QUEUE_FORMAT.strip().format(q)
+                    for _, q in sorted(active.iteritems())]
         if indent_first:
             return textindent('\n'.join(info), indent)
         return info[0] + '\n' + textindent('\n'.join(info[1:]), indent)

+ 1 - 1
celery/app/routes.py

@@ -64,7 +64,7 @@ class Router(object):
             except KeyError:
                 if not self.create_missing:
                     raise QueueNotFound(
-                        'Queue %r is not defined in CELERY_QUEUES' % queue)
+                        'Queue {0!r} missing from CELERY_QUEUES'.format(queue))
                 for key in 'exchange', 'routing_key':
                     if route.get(key) is None:
                         route[key] = queue

+ 22 - 22
celery/app/utils.py

@@ -20,13 +20,13 @@ from .defaults import find
 
 #: Format used to generate bugreport information.
 BUGREPORT_INFO = """
-software -> celery:%(celery_v)s kombu:%(kombu_v)s py:%(py_v)s
-            billiard:%(billiard_v)s %(driver_v)s
-platform -> system:%(system)s arch:%(arch)s imp:%(py_i)s
-loader   -> %(loader)s
-settings -> transport:%(transport)s results:%(results)s
+software -> celery:{celery_v} kombu:{kombu_v} py:{py_v}
+            billiard:{billiard_v} {driver_v}
+platform -> system:{system} arch:{arch} imp:{py_i}
+loader   -> {loader}
+settings -> transport:{transport} results:{results}
 
-%(human_settings)s
+{human_settings}
 """
 
 
@@ -90,7 +90,7 @@ class Settings(datastructures.ConfigurationView):
     def humanize(self):
         """Returns a human readable string showing changes to the
         configuration."""
-        return '\n'.join('%s %s' % (key + ':', pretty(value, width=50))
+        return '\n'.join('{0}: {1}'.format(key, pretty(value, width=50))
                         for key, value in self.without_defaults().iteritems())
 
 
@@ -132,21 +132,21 @@ def bugreport(app):
 
     try:
         trans = app.connection().transport
-        driver_v = '%s:%s' % (trans.driver_name, trans.driver_version())
+        driver_v = '{0}:{1}'.format(trans.driver_name, trans.driver_version())
     except Exception:
         driver_v = ''
 
-    return BUGREPORT_INFO % {
-        'system': _platform.system(),
-        'arch': ', '.join(filter(None, _platform.architecture())),
-        'py_i': platforms.pyimplementation(),
-        'celery_v': celery.VERSION_BANNER,
-        'kombu_v': kombu.__version__,
-        'billiard_v': billiard.__version__,
-        'py_v': _platform.python_version(),
-        'driver_v': driver_v,
-        'transport': app.conf.BROKER_TRANSPORT or 'amqp',
-        'results': app.conf.CELERY_RESULT_BACKEND or 'disabled',
-        'human_settings': app.conf.humanize(),
-        'loader': qualname(app.loader.__class__),
-    }
+    return BUGREPORT_INFO.format(
+        system=_platform.system(),
+        arch=', '.join(filter(None, _platform.architecture())),
+        py_i=platforms.pyimplementation(),
+        celery_v=celery.VERSION_BANNER,
+        kombu_v=kombu.__version__,
+        billiard_v=billiard.__version__,
+        py_v=_platform.python_version(),
+        driver_v=driver_v,
+        transport= app.conf.BROKER_TRANSPORT or 'amqp',
+        results=app.conf.CELERY_RESULT_BACKEND or 'disabled',
+        human_settings=app.conf.humanize(),
+        loader=qualname(app.loader.__class__),
+    )

+ 17 - 17
celery/apps/beat.py

@@ -24,12 +24,12 @@ from celery.utils.timeutils import humanize_seconds
 
 STARTUP_INFO_FMT = """
 Configuration ->
-    . broker -> %(conninfo)s
-    . loader -> %(loader)s
-    . scheduler -> %(scheduler)s
-%(scheduler_info)s
-    . logfile -> %(logfile)s@%(loglevel)s
-    . maxinterval -> %(hmax_interval)s (%(max_interval)ss)
+    . broker -> {conninfo}
+    . loader -> {loader}
+    . scheduler -> {scheduler}
+{scheduler_info}
+    . logfile -> {logfile}@%{loglevel}
+    . maxinterval -> {hmax_interval} ({max_interval}s)
 """.strip()
 
 logger = get_logger('celery.beat')
@@ -62,7 +62,7 @@ class Beat(configurated):
 
     def run(self):
         print(str(self.colored.cyan(
-                    'celerybeat v%s is starting.' % VERSION_BANNER)))
+                    'celerybeat v{0} is starting.'.format(VERSION_BANNER))))
         self.init_loader()
         self.set_process_title()
         self.start_scheduler()
@@ -108,16 +108,16 @@ class Beat(configurated):
 
     def startup_info(self, beat):
         scheduler = beat.get_scheduler(lazy=True)
-        return STARTUP_INFO_FMT % {
-            'conninfo': self.app.connection().as_uri(),
-            'logfile': self.logfile or '[stderr]',
-            'loglevel': LOG_LEVELS[self.loglevel],
-            'loader': qualname(self.app.loader),
-            'scheduler': qualname(scheduler),
-            'scheduler_info': scheduler.info,
-            'hmax_interval': humanize_seconds(beat.max_interval),
-            'max_interval': beat.max_interval,
-        }
+        return STARTUP_INFO_FMT.format(
+            conninfo=self.app.connection().as_uri(),
+            logfile=self.logfile or '[stderr]',
+            loglevel=LOG_LEVELS[self.loglevel],
+            loader=qualname(self.app.loader),
+            scheduler=qualname(scheduler),
+            scheduler_info=scheduler.info,
+            hmax_interval=humanize_seconds(beat.max_interval),
+            max_interval=beat.max_interval,
+            )
 
     def set_process_title(self):
         arg_start = 'manage' in sys.argv[0] and 2 or 1

+ 3 - 5
celery/backends/base.py

@@ -140,8 +140,7 @@ class BaseBackend(object):
         return result
 
     def forget(self, task_id):
-        raise NotImplementedError('%s does not implement forget.' % (
-                    self.__class__))
+        raise NotImplementedError('backend does not implement forget.')
 
     def wait_for(self, task_id, timeout=None, propagate=True, interval=0.5):
         """Wait for task and return its result.
@@ -259,8 +258,7 @@ class BaseDictBackend(BaseBackend):
         self._forget(task_id)
 
     def _forget(self, task_id):
-        raise NotImplementedError('%s does not implement forget.' % (
-                    self.__class__))
+        raise NotImplementedError('backend does not implement forget.')
 
     def get_status(self, task_id):
         """Get the status of a task."""
@@ -411,7 +409,7 @@ class KeyValueStoreBackend(BaseDictBackend):
             for key, value in r.iteritems():
                 yield bytes_to_str(key), value
             if timeout and iterations * interval >= timeout:
-                raise TimeoutError('Operation timed out (%s)' % (timeout, ))
+                raise TimeoutError('Operation timed out ({0})'.format(timeout))
             time.sleep(interval)  # don't busy loop.
             iterations += 0
 

+ 4 - 4
celery/backends/cache.py

@@ -91,9 +91,9 @@ class CacheBackend(KeyValueStoreBackend):
             self.Client = backends[self.backend]()
         except KeyError:
             raise ImproperlyConfigured(
-                    'Unknown cache backend: %s. Please use one of the '
-                    'following backends: %s' % (self.backend,
-                                                ', '.join(backends.keys())))
+                    'Unknown cache backend: {0}. Please use one of the '
+                    'following backends: {1}'.format(self.backend,
+                                        ', '.join(backends.keys())))
 
     def get(self, key):
         return self.client.get(key)
@@ -119,7 +119,7 @@ class CacheBackend(KeyValueStoreBackend):
 
     def __reduce__(self, args=(), kwargs={}):
         servers = ';'.join(self.servers)
-        backend = '%s://%s/' % (self.backend, servers)
+        backend = '{0}://{1}/'.format(self.backend, servers)
         kwargs.update(
             dict(backend=backend,
                  expires=self.expires,

+ 2 - 2
celery/backends/database/models.py

@@ -44,7 +44,7 @@ class Task(ResultModelBase):
                 'date_done': self.date_done}
 
     def __repr__(self):
-        return '<Task %s state: %s>' % (self.task_id, self.status)
+        return '<Task {0.task_id} state: {0.status}>'.format(self)
 
 
 class TaskSet(ResultModelBase):
@@ -69,4 +69,4 @@ class TaskSet(ResultModelBase):
                 'date_done': self.date_done}
 
     def __repr__(self):
-        return '<TaskSet: %s>' % (self.taskset_id, )
+        return '<TaskSet: {0.taskset_id}>'.format(self)

+ 2 - 2
celery/backends/redis.py

@@ -58,9 +58,9 @@ class RedisBackend(KeyValueStoreBackend):
 
         # For compatibility with the old REDIS_* configuration keys.
         def _get(key):
-            for prefix in 'CELERY_REDIS_%s', 'REDIS_%s':
+            for prefix in 'CELERY_REDIS_{0}', 'REDIS_{0}':
                 try:
-                    return conf[prefix % key]
+                    return conf[prefix.format(key)]
                 except KeyError:
                     pass
         if host and '://' in host:

+ 6 - 7
celery/beat.py

@@ -119,9 +119,8 @@ class ScheduleEntry(object):
         return vars(self).iteritems()
 
     def __repr__(self):
-        return ('<Entry: %s %s {%s}' % (self.name,
-                    reprcall(self.task, self.args or (), self.kwargs or {}),
-                    self.schedule))
+        return '<Entry: {0.name} {call} {0.schedule}'.format(self,
+            call=reprcall(self.task, self.args or (), self.kwargs or {}))
 
 
 class Scheduler(object):
@@ -225,8 +224,8 @@ class Scheduler(object):
                                         **entry.options)
         except Exception as exc:
             raise SchedulingError, SchedulingError(
-                "Couldn't apply scheduled task %s: %s" % (
-                    entry.name, exc)), sys.exc_info()[2]
+                "Couldn't apply scheduled task {0.name}: {exc}".format(
+                    entry, exc)), sys.exc_info()[2]
         finally:
             if self.should_sync():
                 self._do_sync()
@@ -370,7 +369,7 @@ class PersistentScheduler(Scheduler):
 
     @property
     def info(self):
-        return '    . db -> %s' % (self.schedule_filename, )
+        return '    . db -> {self.schedule_filename}'.format(self=self)
 
 
 class Service(object):
@@ -473,7 +472,7 @@ def EmbeddedService(*args, **kwargs):
     """Return embedded clock service.
 
     :keyword thread: Run threaded instead of as a separate process.
-        Default is :const:`False`.
+        Uses :mod:`multiprocessing` by default, if available.
 
     """
     if kwargs.pop('thread', False) or _Process is None:

+ 10 - 10
celery/bin/camqadm.py

@@ -99,11 +99,11 @@ class Spec(object):
             return response
         if callable(self.returns):
             return self.returns(response)
-        return self.returns % (response, )
+        return self.returns.format(response)
 
     def format_arg(self, name, type, default_value=None):
         if default_value is not None:
-            return '%s:%s' % (name, default_value)
+            return '{0}:{1}'.format(name, default_value)
         return name
 
     def format_signature(self):
@@ -120,7 +120,7 @@ def dump_message(message):
 
 
 def format_declare_queue(ret):
-    return 'ok. queue:%s messages:%s consumers:%s.' % ret
+    return 'ok. queue:{0} messages:{1} consumers:{2}.'.format(*ret)
 
 
 class AMQShell(cmd.Cmd):
@@ -144,7 +144,7 @@ class AMQShell(cmd.Cmd):
     """
     conn = None
     chan = None
-    prompt_fmt = '%d> '
+    prompt_fmt = '{self.counter}> '
     identchars = cmd.IDENTCHARS = '.'
     needs_reconnect = False
     counter = 1
@@ -175,9 +175,9 @@ class AMQShell(cmd.Cmd):
         'queue.delete': Spec(('queue', str),
                              ('if_unused', bool, 'no'),
                              ('if_empty', bool, 'no'),
-                             returns='ok. %d messages deleted.'),
+                             returns='ok. {0} messages deleted.'),
         'queue.purge': Spec(('queue', str),
-                            returns='ok. %d messages deleted.'),
+                            returns='ok. {0} messages deleted.'),
         'basic.get': Spec(('queue', str),
                           ('no_ack', bool, 'off'),
                           returns=dump_message),
@@ -233,7 +233,7 @@ class AMQShell(cmd.Cmd):
 
     def display_command_help(self, cmd, short=False):
         spec = self.amqp[cmd]
-        self.say('%s %s' % (cmd, spec.format_signature()))
+        self.say('{0} {1}'.format(cmd, spec.format_signature()))
 
     def do_help(self, *args):
         if not args:
@@ -245,7 +245,7 @@ class AMQShell(cmd.Cmd):
             self.display_command_help(args[0])
 
     def default(self, line):
-        self.say("unknown syntax: '%s'. how about some 'help'?" % line)
+        self.say("unknown syntax: {0!r}. how about some 'help'?".format(line))
 
     def get_names(self):
         return set(self.builtins) | set(self.amqp)
@@ -325,7 +325,7 @@ class AMQShell(cmd.Cmd):
 
     @property
     def prompt(self):
-        return self.prompt_fmt % self.counter
+        return self.prompt_fmt.format(self=self)
 
 
 class AMQPAdmin(object):
@@ -342,7 +342,7 @@ class AMQPAdmin(object):
         if conn:
             conn.close()
         conn = self.app.connection()
-        self.note('-> connecting to %s.' % conn.as_uri())
+        self.note('-> connecting to {0}.'.format(conn.as_uri()))
         conn.connect()
         self.note('-> connected.')
         return conn

+ 42 - 35
celery/bin/celery.py

@@ -28,10 +28,15 @@ from celery.bin.base import Command as BaseCommand, Option
 HELP = """
 ---- -- - - ---- Commands- -------------- --- ------------
 
-%(commands)s
+{commands}
 ---- -- - - --------- -- - -------------- --- ------------
 
-Type '%(prog_name)s <command> --help' for help using a specific command.
+Type '{prog_name} <command> --help' for help using a specific command.
+"""
+
+MIGRATE_PROGRESS_FMT = """\
+Migrating task {state.count}/{state.strtotal}: \
+{body[task]}[{body[id]}]\
 """
 
 commands = {}
@@ -79,7 +84,8 @@ def load_extension_commands(namespace='celery.commands'):
         try:
             cls = symbol_by_name(sym)
         except (ImportError, SyntaxError) as exc:
-            warnings.warn('Cannot load extension %r: %r' % (sym, exc))
+            warnings.warn(
+                'Cannot load extension {0!r}: {1!r}'.format(sym, exc))
         else:
             command(cls, name=ep.name)
 
@@ -110,7 +116,7 @@ class Command(BaseCommand):
         try:
             ret = self.run(*args, **kwargs)
         except Error as exc:
-            self.error(self.colored.red('Error: %s' % exc))
+            self.error(self.colored.red('Error: {0!r}'.format(exc)))
             return exc.status
 
         return ret if ret is not None else EX_OK
@@ -138,13 +144,13 @@ class Command(BaseCommand):
         return self(*args, **options)
 
     def usage(self, command):
-        return '%%prog %s [options] %s' % (command, self.args)
+        return '%%prog {0} [options] {self.args}'.format(command, self=self)
 
     def prettify_list(self, n):
         c = self.colored
         if not n:
             return '- empty -'
-        return '\n'.join(str(c.reset(c.white('*'), ' %s' % (item, )))
+        return '\n'.join(str(c.reset(c.white('*'), ' {0}'.format(item)))
                             for item in n)
 
     def prettify_dict_ok_error(self, n):
@@ -314,8 +320,7 @@ class list_(Command):
         except NotImplementedError:
             raise Error('Your transport cannot list bindings.')
 
-        fmt = lambda q, e, r: self.out('%s %s %s' % (q.ljust(28),
-                                                     e.ljust(28), r))
+        fmt = lambda q, e, r: self.out('{0:<28} {1:<28} {2}'.format(q, e, r))
         fmt('Queue', 'Exchange', 'Routing Key')
         fmt('-' * 16, '-' * 16, '-' * 16)
         for b in bindings:
@@ -325,9 +330,9 @@ class list_(Command):
         topics = {'bindings': self.list_bindings}
         available = ', '.join(topics.keys())
         if not what:
-            raise Error('You must specify what to list (%s)' % available)
+            raise Error('You must specify one of {0}'.format(available))
         if what not in topics:
-            raise Error('unknown topic %r (choose one of: %s)' % (
+            raise Error('unknown topic {0!r} (choose one of: {1})'.format(
                             what, available))
         with self.app.connection() as conn:
             self.app.amqp.TaskConsumer(conn).declare()
@@ -397,16 +402,17 @@ class purge(Command):
     WARNING: There is no undo operation for this command.
 
     """
+    fmt_purged = "Purged {mnum} {messages} from {qnum} known task {queues}."
+    fmt_empty = "No messages purged from {qnum} {queues}"
+
     def run(self, *args, **kwargs):
         queues = len(self.app.amqp.queues.keys())
-        messages_removed = self.app.control.purge()
-        if messages_removed:
-            self.out('Purged %s %s from %s known task %s.' % (
-                messages_removed, text.pluralize(messages_removed, 'message'),
-                queues, text.pluralize(queues, 'queue')))
-        else:
-            self.out('No messages purged from %s known %s' % (
-                queues, text.pluralize(queues, 'queue')))
+        messages = self.app.control.purge()
+        fmt = self.fmt_purged if messages else self.fmt_empty
+        self.out(fmt.format(
+            mnum=messages, qnum=queues,
+            messages=text.pluralize(messages, 'message'),
+            queues=text.pluralize(queues, 'queue')))
 purge = command(purge)
 
 
@@ -464,8 +470,8 @@ class _RemoteControl(Command):
             # see if it uses args.
             meth = getattr(self, command)
             return text.join([
-                '|' + text.indent('%s%s %s' % (prefix, color(command),
-                                               meth.__doc__), indent), help,
+                '|' + text.indent('{0}{1} {2}'.format(prefix, color(command),
+                                                meth.__doc__), indent), help,
             ])
 
         except AttributeError:
@@ -488,7 +494,7 @@ class _RemoteControl(Command):
         ])
 
     def usage(self, command):
-        return '%%prog %s [options] %s <command> [arg1 .. argN]' % (
+        return '%%prog {0} [options] {1} <command> [arg1 .. argN]'.format(
                 command, self.args)
 
     def call(self, *args, **kwargs):
@@ -496,15 +502,15 @@ class _RemoteControl(Command):
 
     def run(self, *args, **kwargs):
         if not args:
-            raise Error('Missing %s method. See --help' % self.name)
+            raise Error('Missing {0.name} method. See --help'.format(self))
         return self.do_call_method(args, **kwargs)
 
     def do_call_method(self, args, **kwargs):
         method = args[0]
         if method == 'help':
-            raise Error("Did you mean '%s --help'?" % self.name)
+            raise Error("Did you mean '{0.name} --help'?".format(self))
         if method not in self.choices:
-            raise Error('Unknown %s method %s' % (self.name, method))
+            raise Error('Unknown {0.name} method {1}'.format(self, method))
 
         destination = kwargs.get('destination')
         timeout = kwargs.get('timeout') or self.choices[method][0]
@@ -647,8 +653,8 @@ class status(Command):
                         status=EX_UNAVAILABLE)
         nodecount = len(replies)
         if not kwargs.get('quiet', False):
-            self.out('\n%s %s online.' % (nodecount,
-                                          text.pluralize(nodecount, 'node')))
+            self.out('\n{0} {1} online.'.format(
+                nodecount, text.pluralize(nodecount, 'node')))
 status = command(status)
 
 
@@ -678,10 +684,10 @@ class migrate(Command):
             Option('--forever', '-F', action='store_true',
                     help='Continually migrate tasks until killed.'),
     )
+    progress_fmt = MIGRATE_PROGRESS_FMT
 
     def on_migrate_task(self, state, body, message):
-        self.out('Migrating task %s/%s: %s[%s]' % (
-            state.count, state.strtotal, body['task'], body['id']))
+        self.out(self.progress_fmt.format(state=state, body=body))
 
     def run(self, *args, **kwargs):
         if len(args) != 2:
@@ -812,12 +818,12 @@ class help(Command):
     """Show help screen and exit."""
 
     def usage(self, command):
-        return '%%prog <command> [options] %s' % (self.args, )
+        return '%%prog <command> [options] {0.args}'.format(self)
 
     def run(self, *args, **kwargs):
         self.parser.print_help()
-        self.out(HELP % {'prog_name': self.prog_name,
-                         'commands': CeleryCommand.list_commands()})
+        self.out(HELP.format(prog_name=self.prog_name,
+                             commands=CeleryCommand.list_commands()))
 
         return EX_USAGE
 help = command(help)
@@ -882,12 +888,13 @@ class CeleryCommand(BaseCommand):
     def get_command_info(self, command, indent=0, color=None):
         colored = term.colored().names[color] if color else lambda x: x
         obj = self.commands[command]
+        cmd = 'celery {0}'.format(colored(command))
         if obj.leaf:
-            return '|' + text.indent('celery %s' % colored(command), indent)
+            return '|' + text.indent(cmd, indent)
         return text.join([
             ' ',
-            '|' + text.indent('celery %s --help' % colored(command), indent),
-            obj.list_commands(indent, 'celery %s' % command, colored),
+            '|' + text.indent('{0} --help'.format(cmd), indent),
+            obj.list_commands(indent, 'celery {0}'.format(command), colored),
         ])
 
     @classmethod
@@ -896,7 +903,7 @@ class CeleryCommand(BaseCommand):
         ret = []
         for cls, commands, color in command_classes:
             ret.extend([
-                text.indent('+ %s: ' % white(cls), indent),
+                text.indent('+ {0}: '.format(white(cls)), indent),
                 '\n'.join(self.get_command_info(command, indent + 4, color)
                             for command in commands),
                 ''

+ 1 - 1
celery/bin/celeryd.py

@@ -149,7 +149,7 @@ class WorkerCommand(Command):
             try:
                 kwargs['loglevel'] = mlevel(loglevel)
             except KeyError:  # pragma: no cover
-                self.die('Unknown level %r. Please use one of %s.' % (
+                self.die('Unknown level {0!r}. Please use one of {1}.'.format(
                     loglevel, '|'.join(l for l in LOG_LEVELS.keys()
                       if isinstance(l, basestring))))
         return self.app.Worker(**kwargs).run()

+ 5 - 5
celery/bin/celeryd_detach.py

@@ -72,9 +72,9 @@ class PartialOptionParser(OptionParser):
                 nargs = option.nargs
                 if len(rargs) < nargs:
                     if nargs == 1:
-                        self.error('%s option requires an argument' % opt)
+                        self.error('{0} requires an argument'.format(opt))
                     else:
-                        self.error('%s option requires %d arguments' % (
+                        self.error('{0} requires {1} arguments'.format(
                                     opt, nargs))
                 elif nargs == 1:
                     value = rargs.pop(0)
@@ -83,7 +83,7 @@ class PartialOptionParser(OptionParser):
                     del rargs[0:nargs]
 
             elif had_explicit_value:
-                self.error('%s option does not take a value' % opt)
+                self.error('{0} option does not take a value'.format(opt))
             else:
                 value = None
             option.process(opt, value, values, self)
@@ -121,9 +121,9 @@ class detached_celeryd(object):
         parser = self.Parser(prog_name)
         options, values = parser.parse_args(argv)
         if options.logfile:
-            parser.leftovers.append('--logfile=%s' % (options.logfile, ))
+            parser.leftovers.append('--logfile={0}'.format(options.logfile))
         if options.pidfile:
-            parser.leftovers.append('--pidfile=%s' % (options.pidfile, ))
+            parser.leftovers.append('--pidfile={0}'.format(options.pidfile))
         return options, values, parser.leftovers
 
     def execute_from_commandline(self, argv=None):

+ 26 - 27
celery/bin/celeryd_multi.py

@@ -113,16 +113,16 @@ SIGNAMES = set(sig for sig in dir(signal)
 SIGMAP = dict((getattr(signal, name), name) for name in SIGNAMES)
 
 USAGE = """\
-usage: %(prog_name)s start <node1 node2 nodeN|range> [celeryd options]
-       %(prog_name)s stop <n1 n2 nN|range> [-SIG (default: -TERM)]
-       %(prog_name)s restart <n1 n2 nN|range> [-SIG] [celeryd options]
-       %(prog_name)s kill <n1 n2 nN|range>
+usage: {prog_name} start <node1 node2 nodeN|range> [celeryd options]
+       {prog_name} stop <n1 n2 nN|range> [-SIG (default: -TERM)]
+       {prog_name} restart <n1 n2 nN|range> [-SIG] [celeryd options]
+       {prog_name} kill <n1 n2 nN|range>
 
-       %(prog_name)s show <n1 n2 nN|range> [celeryd options]
-       %(prog_name)s get hostname <n1 n2 nN|range> [-qv] [celeryd options]
-       %(prog_name)s names <n1 n2 nN|range>
-       %(prog_name)s expand template <n1 n2 nN|range>
-       %(prog_name)s help
+       {prog_name} show <n1 n2 nN|range> [celeryd options]
+       {prog_name} get hostname <n1 n2 nN|range> [-qv] [celeryd options]
+       {prog_name} names <n1 n2 nN|range>
+       {prog_name} expand template <n1 n2 nN|range>
+       {prog_name} help
 
 additional options (must appear after command name):
 
@@ -182,7 +182,7 @@ class MultiTool(object):
         try:
             self.commands[argv[0]](argv[1:], cmd)
         except KeyError:
-            self.error('Invalid command: %s' % argv[0])
+            self.error('Invalid command: {0}'.format(argv[0]))
 
         return self.retcode
 
@@ -215,7 +215,7 @@ class MultiTool(object):
         retcodes = []
         self.note('> Starting nodes...')
         for nodename, argv, _ in multi_args(p, cmd):
-            self.note('\t> %s: ' % (nodename, ), newline=False)
+            self.note('\t> {0}: '.format(nodename), newline=False)
             retcode = self.waitexec(argv)
             self.note(retcode and self.FAILED or self.OK)
             retcodes.append(retcode)
@@ -232,7 +232,7 @@ class MultiTool(object):
         except OSError as exc:
             if exc.errno != errno.ESRCH:
                 raise
-            self.note('Could not signal %s (%s): No such process' % (
+            self.note('Could not signal {0} ({1}): No such process'.format(
                         nodename, pid))
             return False
         return True
@@ -261,16 +261,15 @@ class MultiTool(object):
         for node in list(P):
             if node in P:
                 nodename, _, pid = node
-                self.note('\t> %s: %s -> %s' % (nodename,
-                                                SIGMAP[sig][3:],
-                                                pid))
+                self.note('\t> {0}: {1} -> {2}'.format(
+                    nodename, SIGMAP[sig][3:], pid))
                 if not self.signal_node(nodename, pid, sig):
                     on_down(node)
 
         def note_waiting():
             left = len(P)
             if left:
-                self.note(self.colored.blue('> Waiting for %s %s...' % (
+                self.note(self.colored.blue('> Waiting for {0} {1}...'.format(
                     left, pluralize(left, 'node'))), newline=False)
 
         if retry:
@@ -282,7 +281,7 @@ class MultiTool(object):
                     self.note('.', newline=False)
                     nodename, _, pid = node
                     if not self.node_alive(pid):
-                        self.note('\n\t> %s: %s' % (nodename, self.OK))
+                        self.note('\n\t> {0}: {1}'.format(nodename, self.OK))
                         on_down(node)
                         note_waiting()
                         break
@@ -304,7 +303,7 @@ class MultiTool(object):
             if pid:
                 nodes.append((nodename, tuple(argv), pid))
             else:
-                self.note('> %s: %s' % (nodename, self.DOWN))
+                self.note('> {0}: {1}'.format(nodename, self.DOWN))
                 if callback:
                     callback(nodename, argv, pid)
 
@@ -314,7 +313,7 @@ class MultiTool(object):
         self.splash()
         p = NamespacedOptionParser(argv)
         for nodename, _, pid in self.getpids(p, cmd):
-            self.note('Killing node %s (%s)' % (nodename, pid))
+            self.note('Killing node {0} ({1})'.format(nodename, pid))
             self.signal_node(nodename, pid, signal.SIGKILL)
 
     def stop(self, argv, cmd, retry=None, callback=None):
@@ -337,7 +336,7 @@ class MultiTool(object):
 
         def on_node_shutdown(nodename, argv, pid):
             self.note(self.colored.blue(
-                '> Restarting node %s: ' % nodename), newline=False)
+                '> Restarting node {0}: '.format(nodename)), newline=False)
             retval = self.waitexec(argv)
             self.note(retval and self.FAILED or self.OK)
             retvals.append(retval)
@@ -362,24 +361,24 @@ class MultiTool(object):
 
     def usage(self):
         self.splash()
-        self.say(USAGE % {'prog_name': self.prog_name})
+        self.say(USAGE.format(prog_name=self.prog_name))
 
     def splash(self):
         if not self.nosplash:
             c = self.colored
-            self.note(c.cyan('celeryd-multi v%s' % VERSION_BANNER))
+            self.note(c.cyan('celeryd-multi v{0}'.format(VERSION_BANNER)))
 
     def waitexec(self, argv, path=sys.executable):
         args = ' '.join([path] + list(argv))
         argstr = shellsplit(from_utf8(args))
         pipe = Popen(argstr, env=self.env)
-        self.info('  %s' % ' '.join(argstr))
+        self.info('  {0}'.format(' '.join(argstr)))
         retcode = pipe.wait()
         if retcode < 0:
-            self.note('* Child was terminated by signal %s' % (-retcode, ))
+            self.note('* Child was terminated by signal {0}'.format(-retcode))
             return -retcode
         elif retcode > 0:
-            self.note('* Child terminated with failure code %s' % (retcode, ))
+            self.note('* Child terminated with errorcode {0}'.format(retcode))
         return retcode
 
     def error(self, msg=None):
@@ -517,8 +516,8 @@ def format_opt(opt, value):
     if not value:
         return opt
     if opt.startswith('--'):
-        return '%s=%s' % (opt, value)
-    return '%s %s' % (opt, value)
+        return '{0}={1}'.format(opt, value)
+    return '{0} {1}'.format(opt, value)
 
 
 def parse_ns_range(ns, ranges=False):

+ 2 - 2
celery/bin/celeryev.py

@@ -99,8 +99,8 @@ class EvCommand(Command):
             return cam()
 
     def set_process_status(self, prog, info=''):
-        prog = '%s:%s' % (self.prog_name, prog)
-        info = '%s %s' % (info, strargv(sys.argv))
+        prog = '{0}:{1}'.format(self.prog_name, prog)
+        info = '{0} {1}'.format(info, strargv(sys.argv))
         return set_process_title(prog, info=info)
 
     def get_options(self):

+ 5 - 3
celery/canvas.py

@@ -244,7 +244,8 @@ class xmap(_basemap):
 
     def __repr__(self):
         task, it = self._unpack_args(self.kwargs)
-        return '[%s(x) for x in %s]' % (task.task, truncate(repr(it), 100))
+        return '[{0}(x) for x in {1}]'.format(task.task,
+                                              truncate(repr(it), 100))
 Signature.register_type(xmap)
 
 
@@ -253,7 +254,8 @@ class xstarmap(_basemap):
 
     def __repr__(self):
         task, it = self._unpack_args(self.kwargs)
-        return '[%s(*x) for x in %s]' % (task.task, truncate(repr(it), 100))
+        return '[{0}(*x) for x in {1}]'.format(task.task,
+                                               truncate(repr(it), 100))
 Signature.register_type(xstarmap)
 
 
@@ -370,7 +372,7 @@ class chord(Signature):
     def __repr__(self):
         if self.body:
             return self.body.reprcall(self.tasks)
-        return '<chord without body: %r>' % (self.tasks, )
+        return '<chord without body: {0.tasks!r}>'.format(self)
 
     @property
     def tasks(self):

+ 2 - 2
celery/concurrency/base.py

@@ -91,11 +91,11 @@ class BasePool(object):
 
     def terminate_job(self, pid):
         raise NotImplementedError(
-                '%s does not implement kill_job' % (self.__class__, ))
+                '{0} does not implement kill_job'.format(type(self)))
 
     def restart(self):
         raise NotImplementedError(
-                '%s does not implement restart' % (self.__class__, ))
+                '{0} does not implement restart'.format(type(self)))
 
     def stop(self):
         self.on_stop()

+ 2 - 2
celery/contrib/batches.py

@@ -21,7 +21,7 @@ A click counter that flushes the buffer every 100 messages, and every
         from collections import Counter
         count = Counter(request.kwargs['url'] for request in requests)
         for url, count in count.items():
-            print('>>> Clicks: %s -> %s' % (url, count))
+            print('>>> Clicks: {0} -> {1}'.format(url, count))
 
 Registering the click is done as follows:
 
@@ -139,7 +139,7 @@ class Batches(Task):
         self._logging = None
 
     def run(self, requests):
-        raise NotImplementedError('%r must implement run(requests)' % (self, ))
+        raise NotImplementedError('must implement run(requests)')
 
     def flush(self, requests):
         return self.apply_buffer(requests, ([SimpleRequest.from_request(r)

+ 11 - 7
celery/contrib/migrate.py

@@ -6,7 +6,7 @@
     Migration tools.
 
 """
-from __future__ import absolute_import
+from __future__ import absolute_import, print_function
 
 import socket
 
@@ -22,6 +22,12 @@ from celery.app import app_or_default
 from celery.utils import worker_direct
 
 
+MOVING_PROGRESS_FMT = """\
+Moving task {state.filtered}/{state.strtotal}: \
+{body[task]}[{body[id]}]\
+"""
+
+
 class StopFiltering(Exception):
     pass
 
@@ -39,8 +45,8 @@ class State(object):
 
     def __repr__(self):
         if self.filtered:
-            return '^%s' % self.filtered
-        return '%s/%s' % (self.count, self.strtotal)
+            return '^{0.filtered}'.format(self)
+        return '{0.count}/{0.strtotal}'.format(self)
 
 
 def republish(producer, message, exchange=None, routing_key=None,
@@ -349,7 +355,5 @@ move_direct_by_id = partial(move_task_by_id, transform=worker_direct)
 move_direct_by_idmap = partial(move_by_idmap, transform=worker_direct)
 move_direct_by_taskmap = partial(move_by_taskmap, transform=worker_direct)
 
-
-def filter_status(state, body, message):
-    print('Moving task %s/%s: %s[%s]' % (
-            state.filtered, state.strtotal, body['task'], body['id']))
+def filter_status(state, body, message, **kwargs):
+    print(MOVING_PROGRESS_FMT.format(state=state, body=body, **kwargs))

+ 25 - 11
celery/contrib/rdb.py

@@ -55,6 +55,23 @@ _current = [None]
 
 _frame = getattr(sys, '_getframe')
 
+NO_AVAILABLE_PORT = """\
+{self.ident}: Couldn't find an available port.
+
+Please specify one using the CELERY_RDB_PORT environment variable.
+"""
+
+BANNER = """\
+{self.ident}: Please telnet into {self.host} {self.port}.
+
+Type `exit` in session to continue.
+
+{self.ident}: Waiting for client...
+"""
+
+SESSION_STARTED = "{self.ident}: Now in session with {self.remote_addr}."
+SESSION_ENDED = "{self.ident}: Session with {self.remote_addr} ended."
+
 
 class Rdb(Pdb):
     me = 'Remote Debugger'
@@ -71,15 +88,14 @@ class Rdb(Pdb):
         self._sock, this_port = self.get_avail_port(host, port,
             port_search_limit, port_skew)
         self._sock.listen(1)
-        me = '%s:%s' % (self.me, this_port)
-        context = self.context = {'me': me, 'host': host, 'port': this_port}
-        self.say('%(me)s: Please telnet %(host)s %(port)s.'
-                 '  Type `exit` in session to continue.' % context)
-        self.say('%(me)s: Waiting for client...' % context)
+        self.ident = '{0}:{1}'.format(self.me, this_port)
+        self.host = host
+        self.port = this_port
+        self.say(BANNER.format(self=self))
 
         self._client, address = self._sock.accept()
-        context['remote_addr'] = ':'.join(map(str, address))
-        self.say('%(me)s: In session with %(remote_addr)s' % context)
+        self.remote_addr = ':'.join(map(str, address))
+        self.say(SESSION_STARTED.format(self=self))
         self._handle = sys.stdin = sys.stdout = self._client.makefile('rw')
         Pdb.__init__(self, completekey='tab',
                            stdin=self._handle, stdout=self._handle)
@@ -103,9 +119,7 @@ class Rdb(Pdb):
             else:
                 return _sock, this_port
         else:
-            raise Exception(
-                '%s: Could not find available port. Please set using '
-                'environment variable CELERY_RDB_PORT' % (self.me, ))
+            raise Exception(NO_AVAILABLE_PORT.format(self=self))
 
     def say(self, m):
         print(m, file=self.out)
@@ -116,7 +130,7 @@ class Rdb(Pdb):
         self._client.close()
         self._sock.close()
         self.active = False
-        self.say('%(me)s: Session %(remote_addr)s ended.' % self.context)
+        self.say(SESSION_ENDED.format(self=self))
 
     def do_continue(self, arg):
         self._close_session()

+ 6 - 5
celery/datastructures.py

@@ -192,11 +192,11 @@ class DependencyGraph(object):
     def __repr__(self):
         return '\n'.join(self.repr_node(N) for N in self)
 
-    def repr_node(self, obj, level=1):
-        output = ['%s(%s)' % (obj, self.valency_of(obj))]
+    def repr_node(self, obj, level=1, fmt='{0}({1})'):
+        output = [fmt.format(obj, self.valency_of(obj))]
         if obj in self:
             for other in self[obj]:
-                d = '%s(%s)' % (other, self.valency_of(other))
+                d = fmt.format(other, self.valency_of(other))
                 output.append('     ' * level + d)
                 output.extend(self.repr_node(other, level + 1).split('\n')[1:])
         return '\n'.join(output)
@@ -215,7 +215,8 @@ class AttributeDictMixin(object):
             return self[k]
         except KeyError:
             raise AttributeError(
-                "'%s' object has no attribute '%s'" % (type(self).__name__, k))
+                "{0!r} object has no attribute {1!r}".format(
+                    type(self).__name__, k))
 
     def __setattr__(self, key, value):
         """`d[key] = value -> d.key = value`"""
@@ -434,7 +435,7 @@ class LimitedSet(object):
         return iter(self._data)
 
     def __repr__(self):
-        return 'LimitedSet(%r)' % (self._data.keys(), )
+        return 'LimitedSet({0!r})'.format(self._data.keys())
 
     @property
     def chronologically(self):

+ 17 - 13
celery/events/cursesmon.py

@@ -34,6 +34,10 @@ MIN_TASK_WIDTH = 16
 # this module is considered experimental
 # we don't care about coverage.
 
+STATUS_SCREEN = """\
+events: {s.event_count} tasks:{s.task_count} workers:{w_alive}/{w_all}
+"""
+
 
 class CursesMonitor(object):  # pragma: no cover
     keymap = {}
@@ -48,7 +52,7 @@ class CursesMonitor(object):  # pragma: no cover
     online_str = 'Workers online: '
     help_title = 'Keys: '
     help = ('j:up k:down i:info t:traceback r:result c:revoke ^c: quit')
-    greet = 'celeryev %s' % VERSION_BANNER
+    greet = 'celeryev {0}'.format(VERSION_BANNER)
     info_str = 'Info: '
 
     def __init__(self, state, keymap=None, app=None):
@@ -86,7 +90,8 @@ class CursesMonitor(object):  # pragma: no cover
         state = abbr(state, STATE_WIDTH).ljust(STATE_WIDTH)
         timestamp = timestamp.ljust(TIMESTAMP_WIDTH)
 
-        row = '%s %s %s %s %s ' % (uuid, worker, task, timestamp, state)
+        row = '{0} {1} {2} {3} {4} '.format(uuid, worker, task,
+                                            timestamp, state)
         if self.screen_width is None:
             self.screen_width = len(row[:mx])
         return row[:mx]
@@ -200,7 +205,7 @@ class CursesMonitor(object):  # pragma: no cover
                 curline = y()
 
                 host, response = subreply.items()[0]
-                host = '%s: ' % host
+                host = '{0}: '.format(host)
                 self.win.addstr(curline, 3, host, curses.A_BOLD)
                 attr = curses.A_NORMAL
                 text = ''
@@ -274,7 +279,7 @@ class CursesMonitor(object):  # pragma: no cover
                                 curses.A_NORMAL)
 
         return self.alert(alert_callback,
-                'Task details for %s' % self.selected_task)
+                'Task details for {0.selected_task}'.format(self))
 
     def selection_traceback(self):
         if not self.selected_task:
@@ -289,7 +294,7 @@ class CursesMonitor(object):  # pragma: no cover
                 self.win.addstr(y(), 3, line)
 
         return self.alert(alert_callback,
-                'Task Exception Traceback for %s' % self.selected_task)
+                'Task Exception Traceback for {0.selected_task}'.format(self))
 
     def selection_result(self):
         if not self.selected_task:
@@ -304,7 +309,7 @@ class CursesMonitor(object):  # pragma: no cover
                 self.win.addstr(y(), 3, line)
 
         return self.alert(alert_callback,
-                'Task Result for %s' % self.selected_task)
+                'Task Result for {0.selected_task}'.format(self))
 
     def display_task_row(self, lineno, task):
         state_color = self.state_colors.get(task.state)
@@ -365,10 +370,10 @@ class CursesMonitor(object):  # pragma: no cover
             else:
                 info = selection.info()
                 if 'runtime' in info:
-                    info['runtime'] = '%.2fs' % info['runtime']
+                    info['runtime'] = '{0:.2fs}'.format(info['runtime'])
                 if 'result' in info:
                     info['result'] = abbr(info['result'], 16)
-                info = ' '.join('%s=%s' % (key, value)
+                info = ' '.join('{0}={1}'.format(key, value)
                             for key, value in info.items())
                 detail = '... -> key i'
             infowin = abbr(info,
@@ -394,11 +399,10 @@ class CursesMonitor(object):  # pragma: no cover
         # Info
         win.addstr(my - 3, x, self.info_str, curses.A_BOLD)
         win.addstr(my - 3, x + len(self.info_str),
-                'events:%s tasks:%s workers:%s/%s' % (
-                    self.state.event_count, self.state.task_count,
-                    len([w for w in self.state.workers.values()
-                            if w.alive]),
-                    len(self.state.workers)),
+                STATUS_SCREEN.format(s=self.state,
+                    w_alive=len([w for w in self.state.workers.values()
+                                    if w.alive]),
+                    w_all=len(self.state.workers)),
                 curses.A_DIM)
 
         # Help

+ 6 - 6
celery/exceptions.py

@@ -13,7 +13,7 @@ from billiard.exceptions import (  # noqa
 )
 
 UNREGISTERED_FMT = """\
-Task of kind %s is not registered, please make sure it's imported.\
+Task of kind {0} is not registered, please make sure it's imported.\
 """
 
 
@@ -41,7 +41,7 @@ class NotRegistered(KeyError):
     """The task is not registered."""
 
     def __repr__(self):
-        return UNREGISTERED_FMT % str(self)
+        return UNREGISTERED_FMT.format(self)
 
 
 class AlreadyRegistered(Exception):
@@ -71,15 +71,15 @@ class RetryTaskError(Exception):
 
     def humanize(self):
         if isinstance(self.when, int):
-            return 'in %ss' % self.when
-        return 'at %s' % (self.when, )
+            return 'in {0.when}s'.format(self)
+        return 'at {0.when}'.format(self)
 
     def __str__(self):
         if self.message:
             return self.message
         if self.excs:
-            return 'Retry %s: %r' % (self.humanize(), self.excs)
-        return 'Retry %s' % self.humanize()
+            return 'Retry {0}: {!r}'.format(self.humanize(), self.excs)
+        return 'Retry {0}'.format(self.humanize())
 
     def __reduce__(self):
         return self.__class__, (self.message, self.excs, self.when)

+ 4 - 6
celery/local.py

@@ -82,7 +82,7 @@ class Proxy(object):
         try:
             return getattr(self.__local, self.__name__)
         except AttributeError:
-            raise RuntimeError('no object bound to %s' % self.__name__)
+            raise RuntimeError('no object bound to {0.__name__}'.format(self))
 
     @property
     def __dict__(self):
@@ -95,7 +95,7 @@ class Proxy(object):
         try:
             obj = self._get_current_object()
         except RuntimeError:  # pragma: no cover
-            return '<%s unbound>' % self.__class__.__name__
+            return '<{0} unbound>'.format(self.__class__.__name__)
         return repr(obj)
 
     def __nonzero__(self):
@@ -425,7 +425,5 @@ class LocalManager(object):
             release_local(local)
 
     def __repr__(self):
-        return '<%s storages: %d>' % (
-            self.__class__.__name__,
-            len(self.locals)
-        )
+        return '<{0} storages: {1}>'.format(
+            self.__class__.__name__, len(self.locals))

+ 6 - 6
celery/result.py

@@ -197,7 +197,7 @@ class AsyncResult(ResultBase):
         return hash(self.id)
 
     def __repr__(self):
-        return '<%s: %s>' % (self.__class__.__name__, self.id)
+        return '<{0}: {1}>'.format(type(self).__name__, self.id)
 
     def __eq__(self, other):
         if isinstance(other, AsyncResult):
@@ -534,8 +534,8 @@ class ResultSet(ResultBase):
         return NotImplemented
 
     def __repr__(self):
-        return '<%s: [%s]>' % (self.__class__.__name__,
-                               ', '.join(r.id for r in self.results))
+        return '<{0}: [{1}]>'.format(type(self).__name__,
+                                     ', '.join(r.id for r in self.results))
 
     @property
     def subtasks(self):
@@ -598,8 +598,8 @@ class GroupResult(ResultSet):
         return NotImplemented
 
     def __repr__(self):
-        return '<%s: %s [%s]>' % (self.__class__.__name__, self.id,
-                                  ', '.join(r.id for r in self.results))
+        return '<{0}: {1} [{2}]>'.format(type(self).__name__, self.id,
+                                         ', '.join(r.id for r in self.results))
 
     def serializable(self):
         return self.id, [r.serializable() for r in self.results]
@@ -675,7 +675,7 @@ class EagerResult(AsyncResult):
         self._state = states.REVOKED
 
     def __repr__(self):
-        return "<EagerResult: %s>" % self.id
+        return "<EagerResult: {0.id}>".format(self)
 
     @property
     def result(self):

+ 6 - 6
celery/security/certificate.py

@@ -21,7 +21,7 @@ class Certificate(object):
 
     def __init__(self, cert):
         assert crypto is not None
-        with reraise_errors('Invalid certificate: %r'):
+        with reraise_errors('Invalid certificate: {0!r}'):
             self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
 
     def has_expired(self):
@@ -39,11 +39,11 @@ class Certificate(object):
 
     def get_id(self):
         """Serial number/issuer pair uniquely identifies a certificate"""
-        return '%s %s' % (self.get_issuer(), self.get_serial_number())
+        return '{0} {1}'.format(self.get_issuer(), self.get_serial_number())
 
     def verify(self, data, signature, digest):
         """Verifies the signature for string containing data."""
-        with reraise_errors('Bad signature: %r'):
+        with reraise_errors('Bad signature: {0!r}'):
             crypto.verify(self._cert, signature, data, digest)
 
 
@@ -63,11 +63,11 @@ class CertStore(object):
         try:
             return self._certs[id]
         except KeyError:
-            raise SecurityError('Unknown certificate: %r' % (id, ))
+            raise SecurityError('Unknown certificate: {0!r}'.format(id))
 
     def add_cert(self, cert):
         if cert.get_id() in self._certs:
-            raise SecurityError('Duplicate certificate: %r' % (id, ))
+            raise SecurityError('Duplicate certificate: {0!r}'.format(id))
         self._certs[cert.get_id()] = cert
 
 
@@ -83,5 +83,5 @@ class FSCertStore(CertStore):
                 cert = Certificate(f.read())
                 if cert.has_expired():
                     raise SecurityError(
-                        'Expired certificate: %r' % (cert.get_id(), ))
+                        'Expired certificate: {0!r}'.format(cert.get_id()))
                 self.add_cert(cert)

+ 2 - 2
celery/security/key.py

@@ -14,10 +14,10 @@ from .utils import crypto, reraise_errors
 class PrivateKey(object):
 
     def __init__(self, key):
-        with reraise_errors('Invalid private key: %r'):
+        with reraise_errors('Invalid private key: {0!r}'):
             self._key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
 
     def sign(self, data, digest):
         """sign string containing data."""
-        with reraise_errors('Unable to sign data: %r'):
+        with reraise_errors('Unable to sign data: {0!r}'):
             return crypto.sign(self._key, data, digest)

+ 2 - 2
celery/security/serialization.py

@@ -40,7 +40,7 @@ class SecureSerializer(object):
         """serialize data structure into string"""
         assert self._key is not None
         assert self._cert is not None
-        with reraise_errors('Unable to serialize: %r', (Exception, )):
+        with reraise_errors('Unable to serialize: {0!r}', (Exception, )):
             content_type, content_encoding, body = encode(
                     data, serializer=self._serializer)
             # What we sign is the serialized body, not the body itself.
@@ -54,7 +54,7 @@ class SecureSerializer(object):
     def deserialize(self, data):
         """deserialize data structure from string"""
         assert self._cert_store is not None
-        with reraise_errors('Unable to deserialize: %r', (Exception, )):
+        with reraise_errors('Unable to deserialize: {0!r}', (Exception, )):
             payload = self._unpack(data)
             signature, signer, body = (payload['signature'],
                                        payload['signer'],

+ 2 - 2
celery/security/utils.py

@@ -21,10 +21,10 @@ except ImportError:  # pragma: no cover
 
 
 @contextmanager
-def reraise_errors(msg='%r', errors=None):
+def reraise_errors(msg='{0!r}', errors=None):
     assert crypto is not None
     errors = (crypto.Error, ) if errors is None else errors
     try:
         yield
     except errors as exc:
-        raise SecurityError, SecurityError(msg % (exc, )), sys.exc_info()[2]
+        raise SecurityError, SecurityError(msg.format(exc)), sys.exc_info()[2]

+ 1 - 1
celery/tests/bin/test_celeryd_multi.py

@@ -187,7 +187,7 @@ class test_MultiTool(Case):
         pipe.wait.return_value = 2
         self.assertEqual(self.t.waitexec(['-m', 'foo'], 'path'), 2)
         self.t.note.assert_called_with(
-                '* Child terminated with failure code 2')
+                '* Child terminated with errorcode 2')
 
         pipe.wait.return_value = 0
         self.assertFalse(self.t.waitexec(['-m', 'foo', 'path']))

+ 2 - 2
celery/tests/utilities/test_info.py

@@ -28,8 +28,8 @@ QUEUES = {'queue1': {
             'routing_key': 'bind2'}}
 
 
-QUEUE_FORMAT1 = """. queue1:      exchange:exchange1(type1) binding:bind1"""
-QUEUE_FORMAT2 = """. queue2:      exchange:exchange2(type2) binding:bind2"""
+QUEUE_FORMAT1 = '. queue1           exchange=exchange1(type1) key=bind1'
+QUEUE_FORMAT2 = '. queue2           exchange=exchange2(type2) key=bind2'
 
 
 class test_Info(Case):

+ 3 - 3
celery/utils/debug.py

@@ -44,11 +44,11 @@ def memdump(samples=10):
     if filter(None, _mem_sample):
         print('- rss (sample):')
         for mem in sample(_mem_sample, samples):
-            print('-    > %s,' % mem)
+            print('-    > {0},'.format(mem))
         _mem_sample[:] = []
     import gc
     gc.collect()
-    print('- rss (end): %s.' % (mem_rss()))
+    print('- rss (end): {0}.'.format(mem_rss()))
 
 
 def sample(x, n, k=0):
@@ -70,7 +70,7 @@ def mem_rss():
     """Returns RSS memory usage as a humanized string."""
     p = ps()
     if p is not None:
-        return '%sMB' % (format_d(p.get_memory_info().rss // 1024), )
+        return '{0}MB'.format(format_d(p.get_memory_info().rss // 1024))
 
 
 def ps():

+ 6 - 6
celery/utils/dispatch/saferef.py

@@ -27,8 +27,8 @@ def safe_ref(target, on_delete=None):  # pragma: no cover
         # Turn a bound method into a BoundMethodWeakref instance.
         # Keep track of these instances for lookup by disconnect().
         assert hasattr(target, 'im_func'), \
-            """safe_ref target %r has im_self, but no im_func, " \
-            "don't know how to create reference""" % (target, )
+            """safe_ref target {0!r} has im_self, but no im_func, " \
+            "don't know how to create reference""".format(target)
         return get_bound_method_weakref(target=target,
                                         on_delete=on_delete)
     if callable(on_delete):
@@ -142,8 +142,8 @@ class BoundMethodWeakref(object):  # pragma: no cover
                     try:
                         traceback.print_exc()
                     except AttributeError:
-                        print("Exception during saferef %s cleanup function "
-                              "%s: %s" % (self, function, exc))
+                        print("Exception during saferef {0} cleanup function "
+                              "{1}: {2}".format(self, function, exc))
 
         self.deletion_methods = [on_delete]
         self.key = self.calculate_key(target)
@@ -163,8 +163,8 @@ class BoundMethodWeakref(object):  # pragma: no cover
 
     def __str__(self):
         """Give a friendly representation of the object"""
-        return """%s( %s.%s )""" % (
-            self.__class__.__name__,
+        return """{0}( {1}.{2} )""".format(
+            type(self).__name__,
             self.self_name,
             self.func_name,
         )

+ 1 - 1
celery/utils/dispatch/signal.py

@@ -216,6 +216,6 @@ class Signal(object):  # pragma: no cover
                     del self.receivers[idx]
 
     def __repr__(self):
-        return '<Signal: %s>' % (self.__class__.__name__, )
+        return '<Signal: {0}>'.format(type(self).__name__)
 
     __str__ = __repr__

+ 2 - 1
celery/utils/imports.py

@@ -87,7 +87,8 @@ def symbol_by_name(name, aliases={}, imp=None, package=None,
             module = imp(module_name, package=package, **kwargs)
         except ValueError as exc:
             raise ValueError, ValueError(
-                    "Couldn't import %r: %s" % (name, exc)), sys.exc_info()[2]
+                    "Couldn't import {0!r}: {1}".format(
+                        name, exc)), sys.exc_info()[2]
         return getattr(module, cls_name) if cls_name else module
     except (ImportError, AttributeError):
         if default is None:

+ 2 - 2
celery/utils/text.py

@@ -73,8 +73,8 @@ def pluralize(n, text, suffix='s'):
 
 def pretty(value, width=80, nl_width=80, **kw):
     if isinstance(value, dict):
-        return '{\n %s' % (pformat(value, 4, nl_width)[1:])
+        return '{{\n {0}'.format(pformat(value, 4, nl_width)[1:])
     elif isinstance(value, tuple):
-        return '\n%s%s' % (' ' * 4, pformat(value, width=nl_width, **kw))
+        return '\n{0}{1}'.format(' ' * 4, pformat(value, width=nl_width, **kw))
     else:
         return pformat(value, width=width, **kw)

+ 2 - 2
celery/utils/threads.py

@@ -33,7 +33,7 @@ class bgThread(threading.Thread):
         raise NotImplementedError('subclass responsibility')
 
     def on_crash(self, msg, *fmt, **kwargs):
-        print(msg % fmt, file=sys.stderr)
+        print(msg.format(*fmt), file=sys.stderr)
         exc_info = sys.exc_info()
         try:
             traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
@@ -50,7 +50,7 @@ class bgThread(threading.Thread):
                     body()
                 except Exception as exc:
                     try:
-                        self.on_crash('%r crashed: %r', self.name, exc)
+                        self.on_crash('{0!r} crashed: {1!r}', self.name, exc)
                         self._set_stopped()
                     finally:
                         os._exit(1)  # exiting by normal means won't work

+ 3 - 2
celery/worker/consumer.py

@@ -150,11 +150,12 @@ info, warn, error, crit = (logger.info, logger.warn,
 
 
 def debug(msg, *args, **kwargs):
-    logger.debug('Consumer: %s' % (msg, ), *args, **kwargs)
+    logger.debug('Consumer: {0}'.format(msg), *args, **kwargs)
 
 
 def dump_body(m, body):
-    return "%s (%sb)" % (text.truncate(safe_repr(body), 1024), len(m.body))
+    return "{0} ({1}b)".format(text.truncate(safe_repr(body), 1024),
+                               len(m.body))
 
 
 class Component(StartStopComponent):

+ 1 - 1
celery/worker/hub.py

@@ -29,7 +29,7 @@ class BoundedSemaphore(object):
         >>> x = BoundedSemaphore(2)
 
         >>> def callback(i):
-        ...     print('HELLO %r' % i)
+        ...     print('HELLO {0!r}'.format(i))
 
         >>> x.acquire(callback, 1)
         HELLO 1

+ 1 - 1
docs/configuration.rst

@@ -119,7 +119,7 @@ You can change methods too, for example the ``on_failure`` handler:
 .. code-block:: python
 
     def my_on_failure(self, exc, task_id, args, kwargs, einfo):
-        print("Oh no! Task failed: %r" % (exc, ))
+        print("Oh no! Task failed: {0!r}".format(exc))
 
     CELERY_ANNOTATIONS = {"*": {"on_failure": my_on_failure}}
 

+ 1 - 1
docs/faq.rst

@@ -671,7 +671,7 @@ Also, a common pattern is to add callbacks to tasks:
 
     @celery.task(ignore_result=True)
     def log_result(result):
-        logger.info("log_result got: %r" % (result, ))
+        logger.info("log_result got: %r", result)
 
 Invocation::
 

+ 2 - 2
docs/internals/deprecation.rst

@@ -100,13 +100,13 @@ for example::
     def add(x, y, task_id=None):
         print("My task id is %r" % (task_id, ))
 
-must be rewritten into::
+should be rewritten into::
 
     from celery import task
 
     @task()
     def add(x, y):
-        print("My task id is %r" % (add.request.id, ))
+        print("My task id is {0.request.id}".format(add))
 
 
 Task attributes

+ 3 - 3
docs/tutorials/task-cookbook.rst

@@ -43,7 +43,7 @@ The cache key expires after some time in case something unexpected happens
         # The cache key consists of the task name and the MD5 digest
         # of the feed URL.
         feed_url_digest = md5(feed_url).hexdigest()
-        lock_id = '%s-lock-%s' % (self.name, feed_url_hexdigest)
+        lock_id = '{0}-lock-{1}'.format(self.name, feed_url_hexdigest)
 
         # cache.add fails if if the key already exists
         acquire_lock = lambda: cache.add(lock_id, 'true', LOCK_EXPIRE)
@@ -51,7 +51,7 @@ The cache key expires after some time in case something unexpected happens
         # advantage of using add() for atomic locking
         release_lock = lambda: cache.delete(lock_id)
 
-        logger.debug('Importing feed: %s' % feed_url)
+        logger.debug('Importing feed: %s', feed_url)
         if acquire_lock():
             try:
                 feed = Feed.objects.import_feed(feed_url)
@@ -60,4 +60,4 @@ The cache key expires after some time in case something unexpected happens
             return feed.url
 
         logger.debug(
-            'Feed %s is already being imported by another worker' % feed_url)
+            'Feed %s is already being imported by another worker', feed_url)

+ 4 - 4
docs/userguide/application.rst

@@ -379,7 +379,7 @@ chain breaks::
     .. code-block:: python
 
         def hello(to):
-            return 'hello %s' % to
+            return 'hello {0}'.format(to)
 
         >>> from celery.execute import apply_async
 
@@ -397,7 +397,7 @@ chain breaks::
             send_error_emails = True
 
             def run(self, to):
-                return 'hello %s' % to
+                return 'hello {0}'.format(to)
         tasks.register(Hello)
 
         >>> Hello.delay('world!')
@@ -413,7 +413,7 @@ chain breaks::
 
         @task(send_error_emails=True)
         def hello(x):
-            return 'hello %s' % to
+            return 'hello {0}'.format(to)
 
 Abstract Tasks
 ==============
@@ -440,7 +440,7 @@ class: :class:`celery.Task`.
         abstract = True
 
         def __call__(self, *args, **kwargs):
-            print('TASK STARTING: %s[%s]' % (self.name, self.request.id))
+            print('TASK STARTING: {0.name}[{0.request.id}].format(self))
             return self.run(*args, **kwargs)
 
 

+ 2 - 2
docs/userguide/calling.rst

@@ -148,8 +148,8 @@ This is an example error callback:
     def error_handler(uuid):
         result = AsyncResult(uuid)
         exc = result.get(propagate=False)
-        print('Task %r raised exception: %r\n%r' % (
-              exc, result.traceback))
+        print('Task {0} raised exception: {1!r}\n{2!r}'.format(
+              uuid, exc, result.traceback))
 
 it can be added to the task using the ``link_error`` execution
 option:

+ 5 - 3
docs/userguide/canvas.rst

@@ -457,15 +457,17 @@ the error callbacks take the id of the parent task as argument instead:
 
 .. code-block:: python
 
+    from __future__ import print_function
+    import os
     from proj.celery import celery
 
     @celery.task()
     def log_error(task_id):
         result = celery.AsyncResult(task_id)
         result.get(propagate=False)  # make sure result written.
-        with open('/var/errors/%s' % (task_id, )) as fh:
-            fh.write('--\n\n%s %s %s' % (
-                task_id, result.result, result.traceback))
+        with open(os.path.join('/var/errors', task_id)) as fh:
+            print('--\n\n{0} {1} {2}'.format(
+                task_id, result.result, result.traceback), file=fh)
 
 To make it even easier to link tasks together there is
 a special subtask called :class:`~celery.chain` that lets

+ 4 - 4
docs/userguide/monitoring.rst

@@ -502,10 +502,10 @@ Here is an example camera, dumping the snapshot to screen:
             if not state.event_count:
                 # No new events since last snapshot.
                 return
-            print('Workers: %s' % (pformat(state.workers, indent=4), ))
-            print('Tasks: %s' % (pformat(state.tasks, indent=4), ))
-            print('Total: %s events, %s tasks' % (
-                state.event_count, state.task_count))
+            print('Workers: {0}'.format(pformat(state.workers, indent=4)))
+            print('Tasks: {0}'.format(pformat(state.tasks, indent=4)))
+            print('Total: {0.event_count} events, %s {0.task_count}'.format(
+                state))
 
 See the API reference for :mod:`celery.events.state` to read more
 about state objects.

+ 3 - 3
docs/userguide/signals.rst

@@ -30,7 +30,7 @@ Example connecting to the :signal:`task_sent` signal:
     @task_sent.connect
     def task_sent_handler(sender=None, task_id=None, task=None, args=None,
                           kwargs=None, \*\*kwds):
-        print('Got signal task_sent for task id %s' % (task_id, ))
+        print('Got signal task_sent for task id {0}'.format(task_id))
 
 
 Some signals also have a sender which you can filter by. For example the
@@ -44,7 +44,7 @@ has been sent by providing the `sender` argument to
     @task_sent.connect(task_sent_handler, sender='tasks.add')
     def task_sent_handler(sender=None, task_id=None, task=None, args=None,
                           kwargs=None, \*\*kwds):
-        print('Got signal task_sent for task id %s' % (task_id, ))
+        print('Got signal task_sent for task id {0}'.format(task_id)
 
 .. _signal-ref:
 
@@ -225,7 +225,7 @@ used to route a task to any specific worker:
 
     @celeryd_after_setup.connect
     def setup_direct_queue(sender, instance, **kwargs):
-        queue_name = '%s.dq' % sender   # sender is the hostname of the worker
+        queue_name = '{0}.dq'.format(sender)  # sender is the hostname of the worker
         instance.app.queues.select_add(queue_name)
 
 Provides arguments:

+ 6 - 6
docs/userguide/tasks.rst

@@ -229,8 +229,8 @@ An example task accessing information in the context is:
 
     @celery.task()
     def dump_context(x, y):
-        print('Executing task id %r, args: %r kwargs: %r' % (
-            add.request.id, add.request.args, add.request.kwargs))
+        print('Executing task id {0.id}, args: {0.args!r} kwargs: {0.kwargs!r}'.format(
+                add.request))
 
 .. _task-logging:
 
@@ -255,7 +255,7 @@ for all of your tasks at the top of your module:
 
     @celery.task()
     def add(x, y):
-        logger.info('Adding %s + %s' % (x, y))
+        logger.info('Adding {0} + {1}'.format(x, y))
         return x + y
 
 Celery uses the standard Python logger library,
@@ -824,7 +824,7 @@ base class for new task types.
         abstract = True
 
         def after_return(self, *args, **kwargs):
-            print('Task returned: %r' % (self.request, ))
+            print('Task returned: {0!r}'.format(self.request)
 
 
     @celery.task(base=DebugTask)
@@ -1317,11 +1317,11 @@ blog/tasks.py
     @celery.task()
     def spam_filter(comment_id, remote_addr=None):
         logger = spam_filter.get_logger()
-        logger.info('Running spam filter for comment %s' % comment_id)
+        logger.info('Running spam filter for comment %s', comment_id)
 
         comment = Comment.objects.get(pk=comment_id)
         current_domain = Site.objects.get_current().domain
-        akismet = Akismet(settings.AKISMET_KEY, 'http://%s' % domain)
+        akismet = Akismet(settings.AKISMET_KEY, 'http://{0}'.format(domain))
         if not akismet.verify_key():
             raise ImproperlyConfigured('Invalid AKISMET_KEY')
 

+ 1 - 1
examples/celery_http_gateway/tasks.py

@@ -3,4 +3,4 @@ from celery import task
 
 @task()
 def hello_world(to='world'):
-    return 'Hello %s' % to
+    return 'Hello {0}'.format(to)

+ 2 - 2
examples/eventlet/tasks.py

@@ -4,9 +4,9 @@ from eventlet.green import urllib2
 
 @task()
 def urlopen(url):
-    print('Opening: %r' % (url, ))
+    print('Opening: {0}'.format(url))
     try:
         body = urllib2.urlopen(url).read()
     except Exception as exc:
-        print('URL %r gave error: %r' % (url, exc))
+        print('URL {0} gave error: {1!r}'.format(url, exc))
     return len(body)

+ 1 - 1
examples/eventlet/webcrawler.py

@@ -44,7 +44,7 @@ def domain(url):
 
 @task(ignore_result=True, serializer='pickle', compression='zlib')
 def crawl(url, seen=None):
-    print('crawling: %r' % (url, ))
+    print('crawling: {0}'.format(url))
     if not seen:
         seen = BloomFilter(capacity=50000, error_rate=0.0001)
 

+ 3 - 3
examples/gevent/tasks.py

@@ -5,11 +5,11 @@ from celery import task
 
 @task(ignore_result=True)
 def urlopen(url):
-    print('Opening: %r' % (url, ))
+    print('Opening: {0}'.format(url))
     try:
         body = urllib2.urlopen(url).read()
     except Exception as exc:
-        print('Exception for %r: %r' % (url, exc, ))
+        print('Exception for {0}: {1!r}'.format(url, exc))
         return url, 0
-    print('Done with: %r' % (url, ))
+    print('Done with: {0}'.format(url))
     return url, 1

+ 4 - 4
examples/resultgraph/tasks.py

@@ -30,19 +30,19 @@ def add(x, y):
 
 @task()
 def make_request(id, url):
-    print('GET %r' % (url, ))
+    print('GET {0!r}'.format(url)
     return url
 
 
 @task()
 def B_callback(urls, id):
-    print('batch %s done' % (id, ))
+    print('batch {0} done'.format(id))
     return urls
 
 
 @task()
 def B(id):
-    return chord(make_request.s(id, '%s %r' % (id, i, ))
+    return chord(make_request.s(id, '{0} {1!r}'.format(id, i))
                     for i in xrange(10))(B_callback.s(id))
 
 
@@ -84,7 +84,7 @@ def unlock_graph(result, callback, interval=1, propagate=False,
 
 @task()
 def A_callback(res):
-    print('Everything is done: %r' % (res, ))
+    print('Everything is done: {0!r}'.format(res))
     return res
 
 

+ 11 - 11
extra/release/bump_version.py

@@ -57,7 +57,7 @@ class TupleVersion(object):
 
         def quote(lit):
             if isinstance(lit, basestring):
-                return '"%s"' % (lit, )
+                return '"{0}"'.format(lit)
             return str(lit)
 
         if not v[-1]:
@@ -72,8 +72,8 @@ class VersionFile(object):
         self._kept = None
 
     def _as_orig(self, version):
-        return self.wb % {"version": self.type.encode(version),
-                          "kept": self._kept}
+        return self.wb.format(version=self.type.encode(version),
+                              kept=self._kept)
 
     def write(self, version):
         pattern = self.regex
@@ -101,19 +101,19 @@ class VersionFile(object):
 
 class PyVersion(VersionFile):
     regex = re.compile(r'^VERSION\s*=\s*\((.+?)\)')
-    wb = "VERSION = (%(version)s)\n"
+    wb = "VERSION = ({version})\n"
     type = TupleVersion()
 
 
 class SphinxVersion(VersionFile):
     regex = re.compile(r'^:[Vv]ersion:\s*(.+?)$')
-    wb = ':Version: %(version)s\n'
+    wb = ':Version: {version}\n'
     type = StringVersion()
 
 
 class CPPVersion(VersionFile):
     regex = re.compile(r'^\#\s*define\s*(?P<keep>\w*)VERSION\s+(.+)')
-    wb = '#define %(kept)sVERSION "%(version)s"\n'
+    wb = '#define {kept}VERSION "{version}"\n'
     type = StringVersion()
 
 
@@ -144,18 +144,18 @@ def bump(*files, **kwargs):
             raise Exception("Can't bump alpha releases")
         next = (major, minor, release + 1, text)
 
-    print("Bump version from %s -> %s" % (to_str(current), to_str(next)))
+    print("Bump version from {0} -> {1}".format(to_str(current), to_str(next)))
 
     for v in files:
-        print("  writing %r..." % (v.filename, ))
+        print("  writing {0.filename!r}...".format(v))
         v.write(next)
 
     if before_commit:
         cmd(*shlex.split(before_commit))
 
-    print(cmd("git", "commit", "-m", "Bumps version to %s" % (to_str(next), ),
-        *[f.filename for f in files]))
-    print(cmd("git", "tag", "v%s" % (to_str(next), )))
+    print(cmd("git", "commit", "-m", "Bumps version to {0}".format(
+        to_str(next)), *[f.filename for f in files]))
+    print(cmd("git", "tag", "v{0}".format(to_str(next))))
 
 
 def main(argv=sys.argv, version=None, before_commit=None):

+ 4 - 2
extra/release/verify_config_reference.py

@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 from fileinput import input
 from sys import exit, stderr
 
@@ -37,8 +39,8 @@ if __name__ == "__main__":
     sep = """\n  * """
     missing = find_undocumented_settings()
     if missing:
-        stderr.write("Error: found undocumented settings:%s%s\n" % (
-                        sep, sep.join(sorted(missing))))
+        print("Error: found undocumented settings:{0}{1}".format(
+                sep, sep.join(sorted(missing))), file=sys.stderr)
         exit(1)
     print("OK: Configuration reference complete :-)")
     exit(0)

+ 9 - 7
funtests/benchmarks/bench_worker.py

@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 import os
 import sys
 import time
@@ -9,7 +11,7 @@ JSONIMP = os.environ.get('JSONIMP')
 if JSONIMP:
     anyjson.force_implementation(JSONIMP)
 
-print('anyjson implementation: %r' % (anyjson.implementation.name, ))
+print('anyjson implementation: {0!r}'.format(anyjson.implementation.name))
 
 from celery import Celery, group
 
@@ -51,14 +53,14 @@ def it(_, n):
     i = it.cur  # use internal counter, as ordering can be skewed
                 # by previous runs, or the broker.
     if i and not i % 5000:
-        print >> sys.stderr, '(%s so far: %ss)' % (i, tdiff(it.subt))
+        print('({0} so far: {1}s)'.format(i, tdiff(it.subt)), file=sys.stderr)
         it.subt = time.time()
     if not i:
         it.subt = it.time_start = time.time()
     elif i == n - 1:
         total = tdiff(it.time_start)
-        print >> sys.stderr, '(%s so far: %ss)' % (i, tdiff(it.subt))
-        print('-- process %s tasks: %ss total, %s tasks/s} ' % (
+        print('({0} so far: {1}s)'.format(i, tdiff(it.subt)), file=sys.stderr)
+        print('-- process {0} tasks: {1}s total, {2} tasks/s} '.format(
                 n, total, n / (total + .0)))
         sys.exit()
     it.cur += 1
@@ -67,7 +69,7 @@ def it(_, n):
 def bench_apply(n=DEFAULT_ITS):
     time_start = time.time()
     group(it.s(i, n) for i in xrange(n))()
-    print('-- apply %s tasks: %ss' % (n, time.time() - time_start, ))
+    print('-- apply {0} tasks: {1}s'.format(n, time.time() - time_start))
 
 
 def bench_work(n=DEFAULT_ITS, loglevel='CRITICAL'):
@@ -93,8 +95,8 @@ def bench_both(n=DEFAULT_ITS):
 def main(argv=sys.argv):
     n = DEFAULT_ITS
     if len(argv) < 2:
-        print('Usage: %s [apply|work|both] [n=20k]' % (
-                os.path.basename(argv[0]), ))
+        print('Usage: {0} [apply|work|both] [n=20k]'.format(
+                os.path.basename(argv[0])))
         return sys.exit(1)
     try:
         try:

+ 9 - 6
funtests/suite/test_leak.py

@@ -1,3 +1,5 @@
+from __future__ import print_function
+
 import gc
 import os
 import sys
@@ -14,7 +16,7 @@ from celery.tests.utils import unittest
 
 import suite
 
-GET_RSIZE = '/bin/ps -p %(pid)s -o rss='
+GET_RSIZE = '/bin/ps -p {pid} -o rss='
 QUICKTEST = int(os.environ.get('QUICKTEST', 0))
 
 
@@ -37,10 +39,11 @@ class LeakFunCase(unittest.TestCase):
     def get_rsize(self, cmd=GET_RSIZE):
         try:
             return int(subprocess.Popen(
-                        shlex.split(cmd % {'pid': os.getpid()}),
+                        shlex.split(cmd.format(pid=os.getpid())),
                             stdout=subprocess.PIPE).communicate()[0].strip())
         except OSError as exc:
-            raise SkipTest('Can't execute command: %r: %r' % (cmd, exc))
+            raise SkipTest(
+                'Cannot execute command: {0!r}: {1!r}'.format(cmd, exc))
 
     def sample_allocated(self, fun, *args, **kwargs):
         before = self.get_rsize()
@@ -68,7 +71,7 @@ class LeakFunCase(unittest.TestCase):
                 if not first:
                     first = after
                 if self.debug:
-                    print('%r %s: before/after: %s/%s' % (
+                    print('{0!r} {1}: before/after: {2}/{3}'.format(
                             fun, i, before, after))
                 else:
                     sys.stderr.write('.')
@@ -78,8 +81,8 @@ class LeakFunCase(unittest.TestCase):
             try:
                 assert self.appx(first) >= self.appx(after)
             except AssertionError:
-                print('BASE: %r AVG: %r SIZES: %r' % (
-                    base, sizes.average(), sizes, ))
+                print('BASE: {0!r} AVG: {1!r} SIZES: {2!r}'.format(
+                    base, sizes.average(), sizes))
                 raise
         finally:
             self.app.control.purge()

+ 14 - 14
pavement.py

@@ -33,7 +33,7 @@ def html(options):
 def qhtml(options):
     destdir = path('Documentation')
     builtdocs = sphinx_builddir(options)
-    sh('rsync -az %s/ %s' % (builtdocs, destdir))
+    sh('rsync -az {0}/ {1}'.format(builtdocs, destdir))
 
 
 @task
@@ -41,17 +41,17 @@ def qhtml(options):
 def ghdocs(options):
     builtdocs = sphinx_builddir(options)
     sh("git checkout gh-pages && \
-            cp -r %s/* .    && \
+            cp -r {0}/* .    && \
             git commit . -m 'Rendered documentation for Github Pages.' && \
             git push origin gh-pages && \
-            git checkout master" % builtdocs)
+            git checkout master".format(builtdocs))
 
 
 @task
 @needs('clean_docs', 'paver.doctools.html')
 def upload_pypi_docs(options):
     builtdocs = path('docs') / options.builddir / 'html'
-    sh("%s setup.py upload_sphinx --upload-dir='%s'" % (
+    sh("{0} setup.py upload_sphinx --upload-dir='{1}'".format(
         sys.executable, builtdocs))
 
 
@@ -73,8 +73,8 @@ def verifyindex(options):
 
 @task
 def verifyconfigref(options):
-    sh('PYTHONPATH=. %s extra/release/verify_config_reference.py \
-            docs/configuration.rst' % (sys.executable, ))
+    sh('PYTHONPATH=. {0} extra/release/verify_config_reference.py \
+            docs/configuration.rst'.format(sys.executable))
 
 
 @task
@@ -85,10 +85,10 @@ def flake8(options):
     noerror = getattr(options, 'noerror', False)
     complexity = getattr(options, 'complexity', 22)
     sh("""flake8 celery | perl -mstrict -mwarnings -nle'
-        my $ignore = m/too complex \((\d+)\)/ && $1 le %s;
-        if (! $ignore) { print STDERR; our $FOUND_FLAKE = 1 }
+        my $ignore = m/too complex \((\d+)\)/ && $1 le {0};
+        if (! $ignore) {{ print STDERR; our $FOUND_FLAKE = 1 }}
     }{exit $FOUND_FLAKE;
-        '""" % (complexity, ), ignore_error=noerror)
+        '""".format(complexity), ignore_error=noerror)
 
 
 @task
@@ -118,8 +118,8 @@ def clean_readme(options):
 @task
 @needs('clean_readme')
 def readme(options):
-    sh('%s extra/release/sphinx-to-rst.py docs/templates/readme.txt \
-            > README.rst' % (sys.executable, ))
+    sh('{0} extra/release/sphinx-to-rst.py docs/templates/readme.txt \
+            > README.rst'.format(sys.executable))
 
 
 @task
@@ -139,7 +139,7 @@ def test(options):
     if getattr(options, 'coverage', False):
         cmd += ' --with-coverage3'
     if getattr(options, 'quick', False):
-        cmd = 'QUICKTEST=1 SKIP_RLIMITS=1 %s' % cmd
+        cmd = 'QUICKTEST=1 SKIP_RLIMITS=1 {0}'.format(cmd)
     if getattr(options, 'verbose', False):
         cmd += ' --verbosity=2'
     sh(cmd)
@@ -157,8 +157,8 @@ def pep8(options):
 
 @task
 def removepyc(options):
-    sh('find . -type f -a \\( %s \\) | xargs rm' % (
-        ' -o '.join("-name '%s'" % (pat, ) for pat in PYCOMPILE_CACHES), ))
+    sh('find . -type f -a \\( {0} \\) | xargs rm'.format(
+        ' -o '.join("-name '{0}'".format(pat) for pat in PYCOMPILE_CACHES)))
 
 
 @task

+ 2 - 2
setup.py

@@ -30,11 +30,11 @@ try:
         _, task_path, _ = imp.find_module('task', celery.app.__path__)
         if task_path.endswith('/task'):
             print('- force upgrading previous installation')
-            print('  - removing %r package...' % task_path)
+            print('  - removing {0!r} package...'.format(task_path))
             try:
                 shutil.rmtree(os.path.abspath(task_path))
             except Exception:
-                sys.stderr.write('Could not remove %r: %r\n' % (
+                sys.stderr.write('Could not remove {0!r}: {1!r}\n'.format(
                     task_path, sys.exc_info[1]))
     except ImportError:
         print('Upgrade: no old version found.')