Browse Source

Doc improvements

Ask Solem 11 years ago
parent
commit
bfce82e25d

+ 3 - 3
celery/bin/celery.py

@@ -341,8 +341,8 @@ class inspect(_RemoteControl):
     Examples::
     Examples::
 
 
         celery inspect active --timeout=5
         celery inspect active --timeout=5
-        celery inspect scheduled -d worker1.example.com
-        celery inspect revoked -d w1.e.com,w2.e.com
+        celery inspect scheduled -d worker1@example.com
+        celery inspect revoked -d w1@e.com,w2@e.com
 
 
     """
     """
     name = 'inspect'
     name = 'inspect'
@@ -379,7 +379,7 @@ class control(_RemoteControl):
     Examples::
     Examples::
 
 
         celery control enable_events --timeout=5
         celery control enable_events --timeout=5
-        celery control -d worker1.example.com enable_events
+        celery control -d worker1@example.com enable_events
         celery control -d w1.e.com,w2.e.com enable_events
         celery control -d w1.e.com,w2.e.com enable_events
 
 
         celery control -d w1.e.com add_consumer queue_name
         celery control -d w1.e.com add_consumer queue_name

+ 2 - 2
celery/tests/bin/test_celeryevdump.py

@@ -24,12 +24,12 @@ class test_Dumper(AppCase):
 
 
     def test_format_task_event(self):
     def test_format_task_event(self):
         self.dumper.format_task_event(
         self.dumper.format_task_event(
-            'worker.example.com', time(), 'task-started', 'tasks.add', {})
+            'worker@example.com', time(), 'task-started', 'tasks.add', {})
         self.assertTrue(self.out.getvalue())
         self.assertTrue(self.out.getvalue())
 
 
     def test_on_event(self):
     def test_on_event(self):
         event = {
         event = {
-            'hostname': 'worker.example.com',
+            'hostname': 'worker@example.com',
             'timestamp': time(),
             'timestamp': time(),
             'uuid': '1ef',
             'uuid': '1ef',
             'name': 'tasks.add',
             'name': 'tasks.add',

+ 3 - 3
docs/configuration.rst

@@ -768,16 +768,16 @@ so that tasks can be routed to specific workers.
 The queue name for each worker is automatically generated based on
 The queue name for each worker is automatically generated based on
 the worker hostname and a ``.dq`` suffix, using the ``C.dq`` exchange.
 the worker hostname and a ``.dq`` suffix, using the ``C.dq`` exchange.
 
 
-For example the queue name for the worker with hostname ``w1.example.com``
+For example the queue name for the worker with node name ``w1@example.com``
 becomes::
 becomes::
 
 
-    w1.example.com.dq
+    w1@example.com.dq
 
 
 Then you can route the task to the task by specifying the hostname
 Then you can route the task to the task by specifying the hostname
 as the routing key and the ``C.dq`` exchange::
 as the routing key and the ``C.dq`` exchange::
 
 
     CELERY_ROUTES = {
     CELERY_ROUTES = {
-        'tasks.add': {'exchange': 'C.dq', 'routing_key': 'w1.example.com'}
+        'tasks.add': {'exchange': 'C.dq', 'routing_key': 'w1@example.com'}
     }
     }
 
 
 .. setting:: CELERY_CREATE_MISSING_QUEUES
 .. setting:: CELERY_CREATE_MISSING_QUEUES

+ 1 - 1
docs/getting-started/first-steps-with-celery.rst

@@ -380,7 +380,7 @@ for the task at runtime:
 .. code-block:: bash
 .. code-block:: bash
 
 
     $ celery control rate_limit tasks.add 10/m
     $ celery control rate_limit tasks.add 10/m
-    worker.example.com: OK
+    worker@example.com: OK
         new rate limit set successfully
         new rate limit set successfully
 
 
 See :ref:`guide-routing` to read more about task routing,
 See :ref:`guide-routing` to read more about task routing,

+ 1 - 1
docs/getting-started/next-steps.rst

@@ -628,7 +628,7 @@ list of worker host names:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    $ celery -A proj inspect active --destination=worker1.example.com
+    $ celery -A proj inspect active --destination=celery@example.com
 
 
 If a destination is not provided then every worker will act and reply
 If a destination is not provided then every worker will act and reply
 to the request.
 to the request.

+ 4 - 4
docs/userguide/routing.rst

@@ -147,21 +147,21 @@ start it with the ``-Q`` option:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    user@z:/$ celery worker -Q feed_tasks --hostname=z.example.com
+    user@z:/$ celery worker -Q feed_tasks --hostname=z@%h
 
 
 Servers `x` and `y` must be configured to consume from the default queue:
 Servers `x` and `y` must be configured to consume from the default queue:
 
 
 .. code-block:: bash
 .. code-block:: bash
 
 
-    user@x:/$ celery worker -Q default --hostname=x.example.com
-    user@y:/$ celery worker -Q default --hostname=y.example.com
+    user@x:/$ celery worker -Q default --hostname=x@%h
+    user@y:/$ celery worker -Q default --hostname=y@%h
 
 
 If you want, you can even have your feed processing worker handle regular
 If you want, you can even have your feed processing worker handle regular
 tasks as well, maybe in times when there's a lot of work to do:
 tasks as well, maybe in times when there's a lot of work to do:
 
 
 .. code-block:: python
 .. code-block:: python
 
 
-    user@z:/$ celery worker -Q feed_tasks,default --hostname=z.example.com
+    user@z:/$ celery worker -Q feed_tasks,default --hostname=z@%h
 
 
 If you have another queue but on another exchange you want to add,
 If you have another queue but on another exchange you want to add,
 just specify a custom exchange and exchange type:
 just specify a custom exchange and exchange type:

+ 3 - 3
docs/userguide/signals.rst

@@ -314,7 +314,7 @@ to setup worker specific configuration:
 
 
     from celery.signals import celeryd_init
     from celery.signals import celeryd_init
 
 
-    @celeryd_init.connect(sender='worker12.example.com')
+    @celeryd_init.connect(sender='worker12@example.com')
     def configure_worker12(conf=None, **kwargs):
     def configure_worker12(conf=None, **kwargs):
         conf.CELERY_DEFAULT_RATE_LIMIT = '10/m'
         conf.CELERY_DEFAULT_RATE_LIMIT = '10/m'
 
 
@@ -327,9 +327,9 @@ sender when you connect:
 
 
     @celeryd_init.connect
     @celeryd_init.connect
     def configure_workers(sender=None, conf=None, **kwargs):
     def configure_workers(sender=None, conf=None, **kwargs):
-        if sender in ('worker1.example.com', 'worker2.example.com'):
+        if sender in ('worker1@example.com', 'worker2@example.com'):
             conf.CELERY_DEFAULT_RATE_LIMIT = '10/m'
             conf.CELERY_DEFAULT_RATE_LIMIT = '10/m'
-        if sender == 'worker3.example.com':
+        if sender == 'worker3@example.com':
             conf.CELERYD_PREFETCH_MULTIPLIER = 0
             conf.CELERYD_PREFETCH_MULTIPLIER = 0
 
 
 Provides arguments:
 Provides arguments:

+ 20 - 11
docs/userguide/workers.rst

@@ -215,7 +215,7 @@ to receive the command::
     >>> app.control.broadcast('rate_limit', {
     >>> app.control.broadcast('rate_limit', {
     ...     'task_name': 'myapp.mytask',
     ...     'task_name': 'myapp.mytask',
     ...     'rate_limit': '200/m'}, reply=True,
     ...     'rate_limit': '200/m'}, reply=True,
-    ...                             destination=['worker1.example.com'])
+    ...                             destination=['worker1@example.com'])
     [{'worker1.example.com': 'New rate limit set successfully'}]
     [{'worker1.example.com': 'New rate limit set successfully'}]
 
 
 
 
@@ -404,16 +404,21 @@ Rate Limits
 Changing rate-limits at runtime
 Changing rate-limits at runtime
 -------------------------------
 -------------------------------
 
 
-Example changing the rate limit for the `myapp.mytask` task to accept
-200 tasks a minute on all servers::
+Example changing the rate limit for the `myapp.mytask` task to execute
+at most 200 tasks of that type every minute:
+
+.. code-block:: python
 
 
     >>> app.control.rate_limit('myapp.mytask', '200/m')
     >>> app.control.rate_limit('myapp.mytask', '200/m')
 
 
-Example changing the rate limit on a single host by specifying the
-destination host name::
+The above does not specify a destination, so the change request will affect
+all worker instances in the cluster.  If you only want to affect a specific
+list of workers you can include the ``destination`` argument:
+
+.. code-block:: python
 
 
     >>> app.control.rate_limit('myapp.mytask', '200/m',
     >>> app.control.rate_limit('myapp.mytask', '200/m',
-    ...            destination=['worker1.example.com'])
+    ...            destination=['celery@worker1.example.com'])
 
 
 .. warning::
 .. warning::
 
 
@@ -523,7 +528,7 @@ The same can be accomplished dynamically using the :meth:`@control.add_consumer`
     [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}]
     [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}]
 
 
     >>> myapp.control.add_consumer('foo', reply=True,
     >>> myapp.control.add_consumer('foo', reply=True,
-    ...                            destination=['worker1.local'])
+    ...                            destination=['worker1@example.com'])
     [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}]
     [{u'worker1.local': {u'ok': u"already consuming from u'foo'"}}]
 
 
 
 
@@ -541,7 +546,7 @@ even other options::
     ...         'exchange_durable': False,
     ...         'exchange_durable': False,
     ...     },
     ...     },
     ...     reply=True,
     ...     reply=True,
-    ...     destination=['worker1.local', 'worker2.local'])
+    ...     destination=['w1@example.com', 'w2@example.com'])
 
 
 
 
 .. control:: cancel_consumer
 .. control:: cancel_consumer
@@ -1024,10 +1029,12 @@ Additional Commands
 Remote shutdown
 Remote shutdown
 ---------------
 ---------------
 
 
-This command will gracefully shut down the worker remotely::
+This command will gracefully shut down the worker remotely:
+
+.. code-block:: python
 
 
     >>> app.control.broadcast('shutdown') # shutdown all workers
     >>> app.control.broadcast('shutdown') # shutdown all workers
-    >>> app.control.broadcast('shutdown, destination='worker1.example.com')
+    >>> app.control.broadcast('shutdown, destination="worker1@example.com")
 
 
 .. control:: ping
 .. control:: ping
 
 
@@ -1037,7 +1044,9 @@ Ping
 This command requests a ping from alive workers.
 This command requests a ping from alive workers.
 The workers reply with the string 'pong', and that's just about it.
 The workers reply with the string 'pong', and that's just about it.
 It will use the default one second timeout for replies unless you specify
 It will use the default one second timeout for replies unless you specify
-a custom timeout::
+a custom timeout:
+
+.. code-block:: python
 
 
     >>> app.control.ping(timeout=0.5)
     >>> app.control.ping(timeout=0.5)
     [{'worker1.example.com': 'pong'},
     [{'worker1.example.com': 'pong'},

+ 1 - 1
extra/zsh-completion/celery.zsh

@@ -43,7 +43,7 @@ case "$words[1]" in
     '(--purge --discard)'{--discard,--purge}'[Purges all waiting tasks before the daemon is started.]' \
     '(--purge --discard)'{--discard,--purge}'[Purges all waiting tasks before the daemon is started.]' \
     '(-f --logfile=)'{-f,--logfile=}'[Path to log file. If no logfile is specified, stderr is used.]' \
     '(-f --logfile=)'{-f,--logfile=}'[Path to log file. If no logfile is specified, stderr is used.]' \
     '(--loglevel=)--loglevel=:::(critical error warning info debug)' \
     '(--loglevel=)--loglevel=:::(critical error warning info debug)' \
-    '(-N --hostname=)'{-N,--hostname=}'[Set custom hostname, e.g. "foo.example.com".]' \
+    '(-N --hostname=)'{-N,--hostname=}'[Set custom hostname, e.g. "foo@example.com".]' \
     '(-B --beat)'{-B,--beat}'[Also run the celerybeat periodic task scheduler.]' \
     '(-B --beat)'{-B,--beat}'[Also run the celerybeat periodic task scheduler.]' \
     '(-s --schedule=)'{-s,--schedule=}'[Path to the schedule database if running with the -B option. Defaults to celerybeat-schedule.]' \
     '(-s --schedule=)'{-s,--schedule=}'[Path to the schedule database if running with the -B option. Defaults to celerybeat-schedule.]' \
     '(-S --statedb=)'{-S,--statedb=}'[Path to the state database.Default: None]' \
     '(-S --statedb=)'{-S,--statedb=}'[Path to the state database.Default: None]' \