Prechádzať zdrojové kódy

Merge branch '3.0'

Conflicts:
	Changelog
	README.rst
	celery/__init__.py
	celery/platforms.py
	celery/utils/mail.py
	docs/includes/introduction.txt
	requirements/default.txt
	setup.cfg
Ask Solem 12 rokov pred
rodič
commit
0cbdb7dcf9

+ 53 - 1
Changelog

@@ -20,11 +20,63 @@ If you're looking for versions prior to 3.x you should see :ref:`history`.
 - `Task.apply_async` now supports timeout and soft_timeout arguments (Issue #802)
 - `App.control.Inspect.conf` can be used for inspecting worker configuration
 
+.. _version-3.0.7:
+
+3.0.7
+=====
+:release-date: 2012-08-24 05:00 P.M BST
+
+- Fixes several problems with periodic tasks and timezones (Issue #937).
+
+- Now depends on kombu 2.4.2
+
+    - Redis: Fixes a race condition crash
+
+    - Fixes an infinite loop that could happen when retrying establishing
+      the broker connection.
+
+- Daemons now redirect standard file descriptors to /dev/null
+
+    Though by default the standard outs are also redirected
+    to the logger instead, but you can disable this by changing
+    the :setting:`CELERY_REDIRECT_STDOUTS` setting.
+
+- Fixes possible problems when eventlet/gevent is patched too late.
+
+- ``LoggingProxy`` no longer defines ``fileno()`` (Issue #928).
+
+- Results are now ignored for the chord unlock task.
+
+    Fix contributed by Steeve Morin.
+
+- Cassandra backend now works if result expiry is disabled.
+
+    Fix contributed by Steeve Morin.
+
+- The traceback object is now passed to signal handlers instead
+  of the string representation.
+
+    Fix contributed by Adam DePue.
+
+- Celery command: Extensions are now sorted by name.
+
+- A regression caused the ``task-failed`` event to be sent
+  with the exception object instead of its string representation.
+
+- The worker daemon would try to create the pid file before daemonizing
+  to catch errors, but this file was not immediately released (Issue #923).
+
+- Fixes Jython compatibility.
+
+- ``billiard.forking_enable`` was called by all pools not just the
+  processes pool, which would result in a useless warning if the billiard
+  C extensions were not installed.
+
 .. _version-3.0.6:
 
 3.0.6
 =====
-:release-date: 2012-09-17 11:00 P.M BST
+:release-date: 2012-08-17 11:00 P.M BST
 
 - Now depends on kombu 2.4.0
 

+ 20 - 7
celery/platforms.py

@@ -40,8 +40,6 @@ IS_WINDOWS = SYSTEM == 'Windows'
 
 DAEMON_UMASK = 0
 DAEMON_WORKDIR = '/'
-DAEMON_REDIRECT_TO = getattr(os, 'devnull', '/dev/null')
-
 
 PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY
 PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK))
@@ -253,6 +251,13 @@ def _create_pidlock(pidfile):
     return pidlock
 
 
+def fileno(f):
+    try:
+        return f.fileno()
+    except AttributeError:
+        pass
+
+
 class DaemonContext(object):
     _is_open = False
     workdir = DAEMON_WORKDIR
@@ -263,6 +268,12 @@ class DaemonContext(object):
         self.workdir = workdir or self.workdir
         self.umask = self.umask if umask is None else umask
         self.fake = fake
+        self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
+
+    def redirect_to_null(self, fd):
+        if fd:
+            dest = os.open(os.devnull, os.O_RDWR)
+            os.dup2(dest, fd)
 
     def open(self):
         if not self._is_open:
@@ -272,12 +283,14 @@ class DaemonContext(object):
             os.chdir(self.workdir)
             os.umask(self.umask)
 
+            preserve = [fileno(f) for f in self.stdfds if fileno(f)]
             for fd in reversed(range(get_fdmax(default=2048))):
-                with ignore_EBADF():
-                    os.close(fd)
-            os.open(DAEMON_REDIRECT_TO, os.O_RDWR)
-            os.dup2(0, 1)
-            os.dup2(0, 2)
+                if fd not in preserve:
+                    with ignore_EBADF():
+                        os.close(fd)
+
+            for fd in self.stdfds:
+                self.redirect_to_null(fileno(fd))
 
             self._is_open = True
     __enter__ = open

+ 0 - 1
celery/tests/app/test_log.py

@@ -218,7 +218,6 @@ class test_default_logger(AppCase):
             p.flush()
             p.close()
             self.assertFalse(p.isatty())
-            self.assertIsNone(p.fileno())
 
     def test_logging_proxy_recurse_protection(self):
         logger = self.setup_logger(loglevel=logging.ERROR, logfile=None,

+ 12 - 6
celery/tests/tasks/test_tasks.py

@@ -14,6 +14,7 @@ from celery.task import (
     periodic_task,
     PeriodicTask
 )
+from celery import current_app
 from celery.app import app_or_default
 from celery.exceptions import RetryTaskError
 from celery.execute import send_task
@@ -25,6 +26,10 @@ from celery.utils.timeutils import parse_iso8601, timedelta_seconds
 from celery.tests.utils import Case, with_eager_tasks, WhateverIO
 
 
+def now():
+    return current_app.now()
+
+
 def return_True(*args, **kwargs):
     # Task run functions can't be closures/lambdas, as they're pickled.
     return True
@@ -292,8 +297,8 @@ class test_tasks(Case):
 
         # With eta.
         presult2 = T1.apply_async(kwargs=dict(name='George Costanza'),
-                            eta=datetime.utcnow() + timedelta(days=1),
-                            expires=datetime.utcnow() + timedelta(days=2))
+                            eta=now() + timedelta(days=1),
+                            expires=now() + timedelta(days=2))
         self.assertNextTaskDataEqual(consumer, presult2, T1.name,
                 name='George Costanza', test_eta=True, test_expires=True)
 
@@ -530,12 +535,13 @@ class test_periodic_tasks(Case):
             type('Foo', (PeriodicTask, ), {'__module__': __name__})
 
     def test_remaining_estimate(self):
+        s = my_periodic.run_every
         self.assertIsInstance(
-            my_periodic.run_every.remaining_estimate(datetime.utcnow()),
+            s.remaining_estimate(s.maybe_make_aware(now())),
             timedelta)
 
     def test_is_due_not_due(self):
-        due, remaining = my_periodic.run_every.is_due(datetime.utcnow())
+        due, remaining = my_periodic.run_every.is_due(now())
         self.assertFalse(due)
         # This assertion may fail if executed in the
         # first minute of an hour, thus 59 instead of 60
@@ -544,7 +550,7 @@ class test_periodic_tasks(Case):
     def test_is_due(self):
         p = my_periodic
         due, remaining = p.run_every.is_due(
-                datetime.utcnow() - p.run_every.run_every)
+                now() - p.run_every.run_every)
         self.assertTrue(due)
         self.assertEqual(remaining,
                          timedelta_seconds(p.run_every.run_every))
@@ -904,7 +910,7 @@ class test_crontab_remaining_estimate(Case):
 class test_crontab_is_due(Case):
 
     def setUp(self):
-        self.now = datetime.utcnow()
+        self.now = now()
         self.next_minute = 60 - self.now.second - 1e-6 * self.now.microsecond
 
     def test_default_crontab_spec(self):

+ 4 - 5
celery/tests/utilities/test_platforms.py

@@ -296,12 +296,13 @@ if not current_app.IS_WINDOWS:
             finally:
                 platforms.resource = prev
 
-        @patch('celery.platforms.create_pidlock')
+        @patch('celery.platforms._create_pidlock')
         @patch('celery.platforms.signals')
         @patch('celery.platforms.maybe_drop_privileges')
         @patch('os.geteuid')
         @patch('__builtin__.open')
-        def test_default(self, open, geteuid, maybe_drop, signals, pidlock):
+        def test_default(self, open, geteuid, maybe_drop,
+                signals, pidlock):
             geteuid.return_value = 0
             context = detached(uid='user', gid='group')
             self.assertIsInstance(context, DaemonContext)
@@ -345,9 +346,7 @@ if not current_app.IS_WINDOWS:
 
             chdir.assert_called_with(x.workdir)
             umask.assert_called_with(x.umask)
-            open.assert_called_with(platforms.DAEMON_REDIRECT_TO, os.O_RDWR)
-            self.assertEqual(dup2.call_args_list[0], [(0, 1), {}])
-            self.assertEqual(dup2.call_args_list[1], [(0, 2), {}])
+            self.assertTrue(dup2.called)
 
             fork.reset_mock()
             fork.return_value = 1

+ 16 - 2
celery/utils/mail.py

@@ -9,6 +9,7 @@
 from __future__ import absolute_import
 
 import smtplib
+import socket
 import traceback
 import warnings
 
@@ -16,6 +17,15 @@ from email.mime.text import MIMEText
 
 from .functional import maybe_list
 
+_local_hostname = None
+
+
+def get_local_hostname():
+    global _local_hostname
+    if _local_hostname is None:
+        _local_hostname = socket.getfqdn()
+    return _local_hostname
+
 
 class SendmailWarning(UserWarning):
     """Problem happened while sending the email message."""
@@ -68,7 +78,8 @@ class Mailer(object):
 
     def _send(self, message, **kwargs):
         Client = smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
-        client = Client(self.host, self.port, timeout=self.timeout, **kwargs)
+        client = Client(self.host, self.port, timeout=self.timeout,
+                        local_hostname=get_local_hostname(), **kwargs)
 
         if self.use_tls:
             client.ehlo()
@@ -79,7 +90,10 @@ class Mailer(object):
             client.login(self.user, self.password)
 
         client.sendmail(message.sender, message.to, str(message))
-        client.quit()
+        try:
+            client.quit()
+        except socket.sslerror:
+            client.close()
 
 
 class ErrorMail(object):

+ 1 - 1
docs/history/changelog-2.0.rst

@@ -763,7 +763,7 @@ News
         Hard time limit. The worker processing the task will be killed and
         replaced with a new one when this is exceeded.
 
-    * :setting:`CELERYD_SOFT_TASK_TIME_LIMIT`
+    * :setting:`CELERYD_TASK_SOFT_TIME_LIMIT`
 
         Soft time limit. The :exc:`~@SoftTimeLimitExceeded`
         exception will be raised when this is exceeded.  The task can catch

+ 1 - 1
docs/history/changelog-2.1.rst

@@ -137,7 +137,7 @@ Fixes
 
     This has been fixed, and it is now released only once per task.
 
-* docs/configuration: Fixed typo `CELERYD_SOFT_TASK_TIME_LIMIT` ->
+* docs/configuration: Fixed typo `CELERYD_TASK_SOFT_TIME_LIMIT` ->
   :setting:`CELERYD_TASK_SOFT_TIME_LIMIT`.
 
     See issue #214

+ 1 - 1
docs/userguide/workers.rst

@@ -314,7 +314,7 @@ time limit kills it:
             clean_up_in_a_hurry()
 
 Time limits can also be set using the :setting:`CELERYD_TASK_TIME_LIMIT` /
-:setting:`CELERYD_SOFT_TASK_TIME_LIMIT` settings.
+:setting:`CELERYD_TASK_SOFT_TIME_LIMIT` settings.
 
 .. note::
 

+ 1 - 1
requirements/default-py3k.txt

@@ -1,4 +1,4 @@
 billiard>=2.7.3.12
 python-dateutil>=2.1
 pytz
-kombu>=2.4.0,<3.0
+kombu>=2.4.2,<3.0

+ 1 - 1
requirements/default.txt

@@ -1,3 +1,3 @@
 billiard>=2.7.3.12
 python-dateutil>=2.1
-kombu>=2.4.0,<3.0
+kombu>=2.4.2,<3.0

+ 1 - 1
setup.cfg

@@ -18,5 +18,5 @@ requires = uuid
            importlib
            billiard >= 2.7.3.12
            python-dateutil >= 2.1
-           kombu >= 2.4.0
+           kombu >= 2.4.2
            ordereddict