Browse Source

Removes the no_execv/force_execv options

Ask Solem 8 years ago
parent
commit
b5b4836be1

+ 0 - 1
celery/app/defaults.py

@@ -275,7 +275,6 @@ NAMESPACES = Namespace(
         enable_remote_control=Option(
             True, type='bool', old={'celery_enable_remote_control'},
         ),
-        force_execv=Option(False, type='bool'),
         hijack_root_logger=Option(True, type='bool'),
         log_color=Option(type='bool'),
         log_format=Option(DEFAULT_PROCESS_LOG_FMT),

+ 0 - 4
celery/bin/worker.py

@@ -119,10 +119,6 @@ The :program:`celery worker` command (previously known as ``celeryd``)
     completed and the child process will be replaced afterwards.
     Default: no limit.
 
-.. cmdoption:: --no-execv
-
-    Don't do execv after multiprocessing child fork.
-
 .. cmdoption:: --detach
 
     Start worker as a background process.

+ 5 - 6
celery/worker/__init__.py

@@ -341,11 +341,11 @@ class WorkController(object):
                        send_events=None, pool_cls=None, consumer_cls=None,
                        timer_cls=None, timer_precision=None,
                        pool_putlocks=None, pool_restarts=None,
-                       force_execv=None, state_db=None,
-                       schedule_filename=None, scheduler_cls=None,
-                       task_time_limit=None, task_soft_time_limit=None,
-                       max_tasks_per_child=None, prefetch_multiplier=None,
-                       disable_rate_limits=None, worker_lost_wait=None,
+                       state_db=None, schedule_filename=None,
+                       scheduler_cls=None, task_time_limit=None,
+                       task_soft_time_limit=None, max_tasks_per_child=None,
+                       prefetch_multiplier=None, disable_rate_limits=None,
+                       worker_lost_wait=None,
                        max_memory_per_child=None, **_kw):
         either = self.app.either
         self.loglevel = loglevel
@@ -361,7 +361,6 @@ class WorkController(object):
         )
         self.pool_putlocks = either('worker_pool_putlocks', pool_putlocks)
         self.pool_restarts = either('worker_pool_restarts', pool_restarts)
-        self.force_execv = either('worker_force_execv', force_execv)
         self.state_db = either('worker_state_db', state_db)
         self.schedule_filename = either(
             'beat_schedule_filename', schedule_filename,

+ 2 - 5
celery/worker/components.py

@@ -115,12 +115,10 @@ class Pool(bootsteps.StartStopStep):
     """
     requires = (Hub,)
 
-    def __init__(self, w,
-                 no_execv=False, optimization=None, **kwargs):
+    def __init__(self, w, optimization=None, **kwargs):
         w.pool = None
         w.max_concurrency = None
         w.min_concurrency = w.concurrency
-        w.no_execv = no_execv
         self.optimization = optimization
 
     def close(self, w):
@@ -137,7 +135,6 @@ class Pool(bootsteps.StartStopStep):
             warnings.warn(UserWarning(W_POOL_SETTING))
         threaded = not w.use_eventloop or IS_WINDOWS
         procs = w.min_concurrency
-        forking_enable = w.no_execv if w.force_execv else True
         w.process_task = w._process_task
         if not threaded:
             semaphore = w.semaphore = LaxBoundedSemaphore(procs)
@@ -159,7 +156,7 @@ class Pool(bootsteps.StartStopStep):
             threads=threaded,
             max_restarts=max_restarts,
             allow_restart=allow_restart,
-            forking_enable=forking_enable,
+            forking_enable=True,
             semaphore=semaphore,
             sched_strategy=self.optimization,
             app=w.app,

+ 0 - 1
docs/conf.py

@@ -58,7 +58,6 @@ ignored_settings = {
     'redis_port',
     'redis_db',
     'redis_password',
-    'worker_force_execv',
 }
 
 

+ 0 - 1
docs/configuration.rst

@@ -147,7 +147,6 @@ rush in moving to the new settings format.
 ``CELERY_WORKER_DIRECT``               :setting:`worker_direct`
 ``CELERY_DISABLE_RATE_LIMITS``         :setting:`worker_disable_rate_limits`
 ``CELERY_ENABLE_REMOTE_CONTROL``       :setting:`worker_enable_remote_control`
-``CELERYD_FORCE_EXECV``                :setting:`worker_force_execv`
 ``CELERYD_HIJACK_ROOT_LOGGER``         :setting:`worker_hijack_root_logger`
 ``CELERYD_LOG_COLOR``                  :setting:`worker_log_color`
 ``CELERYD_LOG_FORMAT``                 :setting:`worker_log_format`

+ 1 - 1
docs/history/changelog-3.0.rst

@@ -557,7 +557,7 @@ If you're looking for versions prior to 3.0.x you should go to :ref:`history`.
 - ``execv`` is now disabled by default.
 
     It was causing too many problems for users, you can still enable
-    it using the :setting:`CELERYD_FORCE_EXECV` setting.
+    it using the `CELERYD_FORCE_EXECV` setting.
 
     execv was only enabled when transports other than AMQP/Redis was used,
     and it's there to prevent deadlocks caused by mutexes not being released

+ 1 - 1
docs/history/changelog-3.1.rst

@@ -329,7 +329,7 @@ new in Celery 3.1.
 :release-date: 2014-11-19 03:30 P.M UTC
 :release-by: Ask Solem
 
-.. admonition:: Do not enable the :setting:`CELERYD_FORCE_EXECV` setting!
+.. admonition:: Do not enable the `CELERYD_FORCE_EXECV` setting!
 
     Please review your configuration and disable this option if you're using the
     RabbitMQ or Redis transport.

+ 1 - 1
docs/history/whatsnew-2.5.rst

@@ -80,7 +80,7 @@ race condition leading to an annoying warning.
 Solution for hanging workers (but must be manually enabled)
 -----------------------------------------------------------
 
-The :setting:`CELERYD_FORCE_EXECV` setting has been added to solve
+The `CELERYD_FORCE_EXECV` setting has been added to solve
 a problem with deadlocks that originate when threads and fork is mixed
 together:
 

+ 1 - 1
extra/bash-completion/celery.bash

@@ -75,7 +75,7 @@ _celery()
         COMPREPLY=( $(compgen -W '--concurrency= --pool= --purge --logfile=
         --loglevel= --hostname= --beat --schedule= --scheduler= --statedb= --events
         --time-limit= --soft-time-limit= --maxtasksperchild= --queues=
-        --include= --pidfile= --no-execv $fargs' -- ${cur} ) )
+        --include= --pidfile= $fargs' -- ${cur} ) )
         return 0
         ;;
     inspect)

+ 0 - 1
extra/zsh-completion/celery.zsh

@@ -55,7 +55,6 @@ case "$words[1]" in
     '(-Q --queues=)'{-Q,--queues=}'[List of queues to enable for this worker, separated by comma. By default all configured queues are enabled.]' \
     '(-I --include=)'{-I,--include=}'[Comma separated list of additional modules to import.]' \
     '(--pidfile=)--pidfile=[Optional file used to store the process pid.]' \
-    '(--no-execv)--no-execv[Don"t do execv after multiprocessing child fork.]'
     compadd -a ifargs
     ;;
     inspect)