From 7892a5a8dd20566d8246681d10b81c56e3db0695 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 3 Sep 2020 16:50:32 +0300
Subject: [PATCH 001/157] Remove defaults for unsupported Python runtimes.
---
celery/app/defaults.py | 10 ----------
1 file changed, 10 deletions(-)
diff --git a/celery/app/defaults.py b/celery/app/defaults.py
index 2c0bc30f4ec..9d1b140ea2a 100644
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -1,5 +1,4 @@
"""Configuration introspection and defaults."""
-import sys
from collections import deque, namedtuple
from datetime import timedelta
@@ -8,17 +7,8 @@
__all__ = ('Option', 'NAMESPACES', 'flatten', 'find')
-is_jython = sys.platform.startswith('java')
-is_pypy = hasattr(sys, 'pypy_version_info')
DEFAULT_POOL = 'prefork'
-if is_jython:
- DEFAULT_POOL = 'solo'
-elif is_pypy:
- if sys.pypy_version_info[0:3] < (1, 5, 0):
- DEFAULT_POOL = 'solo'
- else:
- DEFAULT_POOL = 'prefork'
DEFAULT_ACCEPT_CONTENT = ['json']
DEFAULT_PROCESS_LOG_FMT = """
From c76293a5bd59f979e477951ad5acc1037cb7753d Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 3 Sep 2020 17:40:19 +0300
Subject: [PATCH 002/157] Remove obsolete test.
---
t/unit/app/test_defaults.py | 10 ----------
1 file changed, 10 deletions(-)
diff --git a/t/unit/app/test_defaults.py b/t/unit/app/test_defaults.py
index 3990737c864..9f8b520a16a 100644
--- a/t/unit/app/test_defaults.py
+++ b/t/unit/app/test_defaults.py
@@ -24,16 +24,6 @@ def test_any(self):
val = object()
assert self.defaults.Option.typemap['any'](val) is val
- @mock.sys_platform('darwin')
- @mock.pypy_version((1, 4, 0))
- def test_default_pool_pypy_14(self):
- assert self.defaults.DEFAULT_POOL == 'solo'
-
- @mock.sys_platform('darwin')
- @mock.pypy_version((1, 5, 0))
- def test_default_pool_pypy_15(self):
- assert self.defaults.DEFAULT_POOL == 'prefork'
-
def test_compat_indices(self):
assert not any(key.isupper() for key in DEFAULTS)
assert not any(key.islower() for key in _OLD_DEFAULTS)
From 4296611f6e556321330130443ce8c90e9796f179 Mon Sep 17 00:00:00 2001
From: Thomas Grainger
Date: Sat, 5 Sep 2020 02:08:24 +0100
Subject: [PATCH 003/157] Doc pytest plugin (#6289)
* update to new pytest name
* doc pytest plugin
* trim heading to the length of the new pytest name
---
CONTRIBUTING.rst | 20 ++++++++++----------
Makefile | 2 +-
celery/contrib/pytest.py | 2 +-
docker/Dockerfile | 2 +-
docs/userguide/testing.rst | 15 ++++++++++++---
setup.py | 2 +-
t/unit/app/test_schedules.py | 2 +-
7 files changed, 27 insertions(+), 18 deletions(-)
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 7564d55933e..a941d2348a9 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -592,14 +592,14 @@ To run the Celery test suite you need to install
$ pip install -U -r requirements/default.txt
After installing the dependencies required, you can now execute
-the test suite by calling :pypi:`py.test `:
+the test suite by calling :pypi:`pytest `:
.. code-block:: console
- $ py.test t/unit
- $ py.test t/integration
+ $ pytest t/unit
+ $ pytest t/integration
-Some useful options to :command:`py.test` are:
+Some useful options to :command:`pytest` are:
* ``-x``
@@ -618,7 +618,7 @@ you can do so like this:
.. code-block:: console
- $ py.test t/unit/worker/test_worker.py
+ $ pytest t/unit/worker/test_worker.py
.. _contributing-coverage:
@@ -636,11 +636,11 @@ Installing the :pypi:`pytest-cov` module:
Code coverage in HTML format
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-#. Run :command:`py.test` with the ``--cov-report=html`` argument enabled:
+#. Run :command:`pytest` with the ``--cov-report=html`` argument enabled:
.. code-block:: console
- $ py.test --cov=celery --cov-report=html
+ $ pytest --cov=celery --cov-report=html
#. The coverage output will then be located in the :file:`htmlcov/` directory:
@@ -651,11 +651,11 @@ Code coverage in HTML format
Code coverage in XML (Cobertura-style)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-#. Run :command:`py.test` with the ``--cov-report=xml`` argument enabled:
+#. Run :command:`pytest` with the ``--cov-report=xml`` argument enabled:
.. code-block:: console
- $ py.test --cov=celery --cov-report=xml
+ $ pytest --cov=celery --cov-report=xml
#. The coverage XML output will then be located in the :file:`coverage.xml` file.
@@ -828,7 +828,7 @@ make it easier for the maintainers to accept your proposed changes:
``Needs Test Coverage``.
- [ ] Make sure unit test coverage does not decrease.
- ``py.test -xv --cov=celery --cov-report=xml --cov-report term``.
+ ``pytest -xv --cov=celery --cov-report=xml --cov-report term``.
You can check the current test coverage here: https://codecov.io/gh/celery/celery
- [ ] Run ``flake8`` against the code. The following commands are valid
diff --git a/Makefile b/Makefile
index d07972a0146..2ffdc12a340 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
PROJ=celery
PGPIDENT="Celery Security Team"
PYTHON=python
-PYTEST=py.test
+PYTEST=pytest
GIT=git
TOX=tox
ICONV=iconv
diff --git a/celery/contrib/pytest.py b/celery/contrib/pytest.py
index cd5ad0e7316..c54ea5cb0fa 100644
--- a/celery/contrib/pytest.py
+++ b/celery/contrib/pytest.py
@@ -1,4 +1,4 @@
-"""Fixtures and testing utilities for :pypi:`py.test `."""
+"""Fixtures and testing utilities for :pypi:`pytest `."""
import os
from contextlib import contextmanager
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 3a9f70c16db..403052787f8 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -114,5 +114,5 @@ WORKDIR $HOME/celery
# Setup the entrypoint, this ensures pyenv is initialized when a container is started
# and that any compiled files from earlier steps or from mounts are removed to avoid
-# py.test failing with an ImportMismatchError
+# pytest failing with an ImportMismatchError
ENTRYPOINT ["/entrypoint"]
diff --git a/docs/userguide/testing.rst b/docs/userguide/testing.rst
index 8167dbf6e24..cc92ae53fb3 100644
--- a/docs/userguide/testing.rst
+++ b/docs/userguide/testing.rst
@@ -88,14 +88,23 @@ in this example:
with raises(Retry):
send_order(product.pk, 3, Decimal(30.6))
-Py.test
-=======
+pytest
+======
.. versionadded:: 4.0
-Celery is also a :pypi:`pytest` plugin that adds fixtures that you can
+Celery also makes a :pypi:`pytest` plugin available that adds fixtures that you can
use in your integration (or unit) test suites.
+Enabling
+--------
+
+Celery initially ships the plugin in a disabled state, to enable it you can either:
+
+ * `pip install celery[pytest]`
+ * `pip install pytest-celery`
+ * or add `pytest_plugins = 'celery.contrib.pytest'` to your pytest.ini
+
Marks
-----
diff --git a/setup.py b/setup.py
index 258b152900a..c5843c28321 100644
--- a/setup.py
+++ b/setup.py
@@ -136,7 +136,7 @@ def long_description():
class pytest(setuptools.command.test.test):
- user_options = [('pytest-args=', 'a', 'Arguments to pass to py.test')]
+ user_options = [('pytest-args=', 'a', 'Arguments to pass to pytest')]
def initialize_options(self):
setuptools.command.test.test.initialize_options(self)
diff --git a/t/unit/app/test_schedules.py b/t/unit/app/test_schedules.py
index 669d189e216..881791a10ed 100644
--- a/t/unit/app/test_schedules.py
+++ b/t/unit/app/test_schedules.py
@@ -599,7 +599,7 @@ def seconds_almost_equal(self, a, b, precision):
try:
assertions.assertAlmostEqual(a, b + skew, precision)
except Exception as exc:
- # AssertionError != builtins.AssertionError in py.test
+ # AssertionError != builtins.AssertionError in pytest
if 'AssertionError' in str(exc):
if index + 1 >= 3:
raise
From f0dca0db35ba2d4a6d022698a8c17985fbf0351d Mon Sep 17 00:00:00 2001
From: Martin Paulus
Date: Fri, 4 Sep 2020 00:22:39 +0800
Subject: [PATCH 004/157] add warning against use of sort key on dynamodb
table, closes #6332
---
docs/userguide/configuration.rst | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index 6dedd402e55..cfb413eb156 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -1675,6 +1675,13 @@ AWS DynamoDB backend settings
See :ref:`bundles` for information on combining multiple extension
requirements.
+.. warning::
+
+ The Dynamodb backend is not compatible with tables that have a sort key defined.
+
+ If you want to query the results table based on something other than the partition key,
+ please define a global secondary index (GSI) instead.
+
This backend requires the :setting:`result_backend`
setting to be set to a DynamoDB URL::
From 8d6f7a8a787c0ffe2f79ef877e52bb81da335890 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Mon, 7 Sep 2020 22:43:38 +1000
Subject: [PATCH 005/157] Remove celery.five and bump vine dep (#6338)
* improv: Replace `five.values` with `dict.values`
* improv: Use `time.monotonic()` in kombu tests
Also in the docs where it is used to demonstrate `memcache` timeouts.
* rm: Delete `celery.five`
`vine.five` is no longer present in `vine >= 5`.
* triv: Remove refs to `celery.five` in docs, &c
* build: Bump `vine` dependency to 5.0+
---
.coveragerc | 1 -
CONTRIBUTING.rst | 1 -
celery/five.py | 7 -------
celery/utils/log.py | 4 +---
docs/conf.py | 1 -
docs/tutorials/task-cookbook.rst | 6 +++---
requirements/default.txt | 2 +-
t/benchmarks/bench_worker.py | 12 +++++-------
8 files changed, 10 insertions(+), 24 deletions(-)
delete mode 100644 celery/five.py
diff --git a/.coveragerc b/.coveragerc
index 12323f0b012..4d3146384b7 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -12,7 +12,6 @@ omit =
*/celery/bin/graph.py
*celery/bin/logtool.py
*celery/task/base.py
- *celery/five.py
*celery/contrib/sphinx.py
*celery/concurrency/asynpool.py
*celery/utils/debug.py
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index a941d2348a9..9814b9c7ee4 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -1067,7 +1067,6 @@ is following the conventions.
from Queue import Queue, Empty
from .platforms import Pidfile
- from .five import zip_longest, items, range
from .utils.time import maybe_timedelta
* Wild-card imports must not be used (`from xxx import *`).
diff --git a/celery/five.py b/celery/five.py
deleted file mode 100644
index f89738aa14b..00000000000
--- a/celery/five.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""Python 2/3 compatibility utilities."""
-
-import sys
-
-import vine.five
-
-sys.modules[__name__] = vine.five
diff --git a/celery/utils/log.py b/celery/utils/log.py
index 95941284043..6acff167fcf 100644
--- a/celery/utils/log.py
+++ b/celery/utils/log.py
@@ -11,8 +11,6 @@
from kombu.log import get_logger as _get_logger
from kombu.utils.encoding import safe_str
-from celery.five import values
-
from .term import colored
__all__ = (
@@ -45,7 +43,7 @@ def set_in_sighandler(value):
def iter_open_logger_fds():
seen = set()
- loggers = (list(values(logging.Logger.manager.loggerDict)) +
+ loggers = (list(logging.Logger.manager.loggerDict.values()) +
[logging.getLogger(None)])
for l in loggers:
try:
diff --git a/docs/conf.py b/docs/conf.py
index 4b6750ae83a..6c7dbc6aaad 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -25,7 +25,6 @@
'cyanide': ('https://cyanide.readthedocs.io/en/latest', None),
},
apicheck_ignore_modules=[
- 'celery.five',
'celery.__main__',
'celery.task',
'celery.contrib.testing',
diff --git a/docs/tutorials/task-cookbook.rst b/docs/tutorials/task-cookbook.rst
index 4ed3c267b36..41e2db734bb 100644
--- a/docs/tutorials/task-cookbook.rst
+++ b/docs/tutorials/task-cookbook.rst
@@ -37,8 +37,8 @@ For this reason your tasks run-time shouldn't exceed the timeout.
.. code-block:: python
+ import time
from celery import task
- from celery.five import monotonic
from celery.utils.log import get_task_logger
from contextlib import contextmanager
from django.core.cache import cache
@@ -51,7 +51,7 @@ For this reason your tasks run-time shouldn't exceed the timeout.
@contextmanager
def memcache_lock(lock_id, oid):
- timeout_at = monotonic() + LOCK_EXPIRE - 3
+ timeout_at = time.monotonic() + LOCK_EXPIRE - 3
# cache.add fails if the key already exists
status = cache.add(lock_id, oid, LOCK_EXPIRE)
try:
@@ -59,7 +59,7 @@ For this reason your tasks run-time shouldn't exceed the timeout.
finally:
# memcache delete is very slow, but we have to use it to take
# advantage of using add() for atomic locking
- if monotonic() < timeout_at and status:
+ if time.monotonic() < timeout_at and status:
# don't release the lock if we exceeded the timeout
# to lessen the chance of releasing an expired lock
# owned by someone else
diff --git a/requirements/default.txt b/requirements/default.txt
index de7bc9c14b0..124c56679da 100644
--- a/requirements/default.txt
+++ b/requirements/default.txt
@@ -1,7 +1,7 @@
pytz>dev
billiard>=3.6.3.0,<4.0
kombu>=5.0.0,<6.0
-vine==1.3.0
+vine>=5.0.0,<6.0
click>=7.0
click-didyoumean>=0.0.3
click-repl>=0.1.6
diff --git a/t/benchmarks/bench_worker.py b/t/benchmarks/bench_worker.py
index c538e4e3286..716094a5ed8 100644
--- a/t/benchmarks/bench_worker.py
+++ b/t/benchmarks/bench_worker.py
@@ -1,8 +1,6 @@
import os
import sys
-from kombu.five import monotonic # noqa
-
from celery import Celery # noqa
os.environ.update(
@@ -41,7 +39,7 @@
def tdiff(then):
- return monotonic() - then
+ return time.monotonic() - then
@app.task(cur=0, time_start=None, queue='bench.worker', bare=True)
@@ -51,9 +49,9 @@ def it(_, n):
i = it.cur
if i and not i % 5000:
print('({} so far: {}s)'.format(i, tdiff(it.subt)), file=sys.stderr)
- it.subt = monotonic()
+ it.subt = time.monotonic()
if not i:
- it.subt = it.time_start = monotonic()
+ it.subt = it.time_start = time.monotonic()
elif i > n - 2:
total = tdiff(it.time_start)
print('({} so far: {}s)'.format(i, tdiff(it.subt)), file=sys.stderr)
@@ -66,11 +64,11 @@ def it(_, n):
def bench_apply(n=DEFAULT_ITS):
- time_start = monotonic()
+ time_start = time.monotonic()
task = it._get_current_object()
with app.producer_or_acquire() as producer:
[task.apply_async((i, n), producer=producer) for i in range(n)]
- print('-- apply {} tasks: {}s'.format(n, monotonic() - time_start))
+ print('-- apply {} tasks: {}s'.format(n, time.monotonic() - time_start))
def bench_work(n=DEFAULT_ITS, loglevel='CRITICAL'):
From 79208bdacb7016d614d9ed562870c3d2fd8c164f Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Sun, 6 Sep 2020 18:51:04 +0300
Subject: [PATCH 006/157] Wheels are no longer universal.
---
setup.cfg | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/setup.cfg b/setup.cfg
index 460aa2b4262..fc8847c6200 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -34,7 +34,7 @@ requires = pytz >= 2016.7
kombu >= 4.6.8,<5.0.0
[bdist_wheel]
-universal = 1
+universal = 0
[metadata]
license_file = LICENSE
From 47acd55c89bae08a3493f3aff43ce0569494c9ad Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 7 Sep 2020 15:46:43 +0300
Subject: [PATCH 007/157] Remove failing before_install step.
---
.travis.yml | 1 -
1 file changed, 1 deletion(-)
diff --git a/.travis.yml b/.travis.yml
index 1b6044bfc6b..fe00bfb458f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -56,7 +56,6 @@ matrix:
stage: lint
- python: pypy3.6-7.3.1
env: TOXENV=pypy3
- before_install: sudo apt-get update && sudo apt-get install libgnutls-dev
stage: test
before_install:
From c7608422bb3569dd180178767ae8b06df2e3c67c Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 7 Sep 2020 16:01:17 +0300
Subject: [PATCH 008/157] Update changelog.
---
Changelog.rst | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/Changelog.rst b/Changelog.rst
index 1667a7f46f2..2ff15232008 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -14,6 +14,14 @@ an overview of what's new in Celery 5.0.
:release-date: N/A
:release-by: Omer Katz
+
+5.0.0rc3
+========
+:release-date: 2020-09-01 6.30 P.M UTC+3:00
+:release-by: Omer Katz
+
+- More cleanups of leftover Python 2 support. (#6338)
+
5.0.0rc2
========
:release-date: 2020-09-01 6.30 P.M UTC+3:00
From a192be01f5f5133bee6b8a2ad85e46fc0773e7c2 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 7 Sep 2020 16:01:25 +0300
Subject: [PATCH 009/157] =?UTF-8?q?Bump=20version:=205.0.0rc2=20=E2=86=92?=
=?UTF-8?q?=205.0.0rc3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.bumpversion.cfg | 2 +-
README.rst | 6 +++---
celery/__init__.py | 2 +-
docs/includes/introduction.txt | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 699988ca6a2..62e8b476da7 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 5.0.0rc2
+current_version = 5.0.0rc3
commit = True
tag = True
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(?P[a-z\d]+)?
diff --git a/README.rst b/README.rst
index 1dc01b950c8..2193389d89c 100644
--- a/README.rst
+++ b/README.rst
@@ -2,7 +2,7 @@
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge|
-:Version: 5.0.0rc2 (cliffs)
+:Version: 5.0.0rc3 (cliffs)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
@@ -57,7 +57,7 @@ in such a way that the client enqueues an URL to be requested by a worker.
What do I need?
===============
-Celery version 5.0.0rc2 runs on,
+Celery version 5.0.0rc3 runs on,
- Python (3.6, 3.7, 3.8)
- PyPy3.6 (7.6)
@@ -89,7 +89,7 @@ Get Started
===========
If this is the first time you're trying to use Celery, or you're
-new to Celery 5.0.0rc2 coming from previous versions then you should read our
+new to Celery 5.0.0rc3 coming from previous versions then you should read our
getting started tutorials:
- `First steps with Celery`_
diff --git a/celery/__init__.py b/celery/__init__.py
index eadf89fff5d..1413244a3a3 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -17,7 +17,7 @@
SERIES = 'singularity'
-__version__ = '5.0.0rc2'
+__version__ = '5.0.0rc3'
__author__ = 'Ask Solem'
__contact__ = 'auvipy@gmail.com'
__homepage__ = 'http://celeryproject.org'
diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt
index 6ecdd5beae2..ec4ccbbaf45 100644
--- a/docs/includes/introduction.txt
+++ b/docs/includes/introduction.txt
@@ -1,4 +1,4 @@
-:Version: 5.0.0rc2 (cliffs)
+:Version: 5.0.0rc3 (cliffs)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
From 08678ef5b2faeccccdff705b1d40d7d0352b4488 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 7 Sep 2020 16:02:06 +0300
Subject: [PATCH 010/157] Fix release date.
---
Changelog.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Changelog.rst b/Changelog.rst
index 2ff15232008..c5126ef3bd6 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -17,7 +17,7 @@ an overview of what's new in Celery 5.0.
5.0.0rc3
========
-:release-date: 2020-09-01 6.30 P.M UTC+3:00
+:release-date: 2020-09-07 4.00 P.M UTC+3:00
:release-by: Omer Katz
- More cleanups of leftover Python 2 support. (#6338)
From a8e88bc4c8e06754dec7d3a1711a5c85853ad07c Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 7 Sep 2020 17:14:45 +0300
Subject: [PATCH 011/157] Remove unused import.
---
t/unit/app/test_defaults.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/t/unit/app/test_defaults.py b/t/unit/app/test_defaults.py
index 9f8b520a16a..e105f2b49d2 100644
--- a/t/unit/app/test_defaults.py
+++ b/t/unit/app/test_defaults.py
@@ -1,8 +1,6 @@
import sys
from importlib import import_module
-from case import mock
-
from celery.app.defaults import (_OLD_DEFAULTS, _OLD_SETTING_KEYS,
_TO_NEW_KEY, _TO_OLD_KEY, DEFAULTS,
NAMESPACES, SETTING_KEYS)
From 465d26766d6d959e9f871bf3663d3491e4b82883 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 7 Sep 2020 17:44:35 +0300
Subject: [PATCH 012/157] Correctly skip these tests when the relevant
dependency is missing.
---
t/unit/backends/test_azureblockblob.py | 2 +-
t/unit/backends/test_mongodb.py | 60 +++++++++++++-------------
2 files changed, 32 insertions(+), 30 deletions(-)
diff --git a/t/unit/backends/test_azureblockblob.py b/t/unit/backends/test_azureblockblob.py
index 07f1c6daeb3..969993290d4 100644
--- a/t/unit/backends/test_azureblockblob.py
+++ b/t/unit/backends/test_azureblockblob.py
@@ -9,7 +9,7 @@
MODULE_TO_MOCK = "celery.backends.azureblockblob"
-pytest.importorskip('azure')
+pytest.importorskip('azure.storage.blob')
class test_AzureBlockBlobBackend:
diff --git a/t/unit/backends/test_mongodb.py b/t/unit/backends/test_mongodb.py
index 867754f3894..a67411f6121 100644
--- a/t/unit/backends/test_mongodb.py
+++ b/t/unit/backends/test_mongodb.py
@@ -31,7 +31,6 @@
class test_MongoBackend:
-
default_url = 'mongodb://uuuu:pwpw@hostname.dom/database'
replica_set_url = (
'mongodb://uuuu:pwpw@hostname.dom,'
@@ -128,11 +127,10 @@ def test_init_with_settings(self):
mb = MongoBackend(app=self.app, url='mongodb://')
- @patch('dns.resolver.query')
- def test_init_mongodb_dns_seedlist(self, dns_resolver_query):
- from dns.name import Name
- from dns.rdtypes.ANY.TXT import TXT
- from dns.rdtypes.IN.SRV import SRV
+ def test_init_mongodb_dns_seedlist(self):
+ Name = pytest.importorskip('dns.name').Name
+ TXT = pytest.importorskip('dns.rdtypes.ANY.TXT').TXT
+ SRV = pytest.importorskip('dns.rdtypes.IN.SRV').SRV
self.app.conf.mongodb_backend_settings = None
@@ -150,8 +148,6 @@ def mock_resolver(_, rdtype, rdclass=None, lifetime=None, **kwargs):
elif rdtype == 'TXT':
return [TXT(0, 0, [b'replicaSet=rs0'])]
- dns_resolver_query.side_effect = mock_resolver
-
# uri with user, password, database name, replica set,
# DNS seedlist format
uri = ('srv://'
@@ -159,20 +155,21 @@ def mock_resolver(_, rdtype, rdclass=None, lifetime=None, **kwargs):
'dns-seedlist-host.example.com/'
'celerydatabase')
- mb = MongoBackend(app=self.app, url=uri)
- assert mb.mongo_host == [
- 'mongo1.example.com:27017',
- 'mongo2.example.com:27017',
- 'mongo3.example.com:27017',
- ]
- assert mb.options == dict(
- mb._prepare_client_options(),
- replicaset='rs0',
- ssl=True
- )
- assert mb.user == 'celeryuser'
- assert mb.password == 'celerypassword'
- assert mb.database_name == 'celerydatabase'
+ with patch('dns.resolver.query', side_effect=mock_resolver):
+ mb = MongoBackend(app=self.app, url=uri)
+ assert mb.mongo_host == [
+ 'mongo1.example.com:27017',
+ 'mongo2.example.com:27017',
+ 'mongo3.example.com:27017',
+ ]
+ assert mb.options == dict(
+ mb._prepare_client_options(),
+ replicaset='rs0',
+ ssl=True
+ )
+ assert mb.user == 'celeryuser'
+ assert mb.password == 'celerypassword'
+ assert mb.database_name == 'celerydatabase'
def test_ensure_mongodb_uri_compliance(self):
mb = MongoBackend(app=self.app, url=None)
@@ -181,7 +178,7 @@ def test_ensure_mongodb_uri_compliance(self):
assert compliant_uri('mongodb://') == 'mongodb://localhost'
assert compliant_uri('mongodb+something://host') == \
- 'mongodb+something://host'
+ 'mongodb+something://host'
assert compliant_uri('something://host') == 'mongodb+something://host'
@@ -564,7 +561,8 @@ def test_encode(self, mongo_backend_factory, serializer, encoded_into):
backend = mongo_backend_factory(serializer=serializer)
assert isinstance(backend.encode(10), encoded_into)
- def test_encode_decode(self, mongo_backend_factory, serializer, encoded_into):
+ def test_encode_decode(self, mongo_backend_factory, serializer,
+ encoded_into):
backend = mongo_backend_factory(serializer=serializer)
decoded = backend.decode(backend.encode(12))
assert decoded == 12
@@ -647,9 +645,11 @@ def find_one(self, task_id):
monkeypatch.setattr(MongoBackend, "collection", FakeMongoCollection())
@pytest.mark.parametrize("serializer,result_type,result", [
- (s, type(i['result']), i['result']) for i in SUCCESS_RESULT_TEST_DATA for s in i['serializers']]
- )
- def test_encode_success_results(self, mongo_backend_factory, serializer, result_type, result):
+ (s, type(i['result']), i['result']) for i in SUCCESS_RESULT_TEST_DATA
+ for s in i['serializers']]
+ )
+ def test_encode_success_results(self, mongo_backend_factory, serializer,
+ result_type, result):
backend = mongo_backend_factory(serializer=serializer)
backend.store_result(TASK_ID, result, 'SUCCESS')
recovered = backend.get_result(TASK_ID)
@@ -658,8 +658,10 @@ def test_encode_success_results(self, mongo_backend_factory, serializer, result_
assert type(recovered) == result_type
assert recovered == result
- @pytest.mark.parametrize("serializer", ["bson", "pickle", "yaml", "json", "msgpack"])
- def test_encode_exception_error_results(self, mongo_backend_factory, serializer):
+ @pytest.mark.parametrize("serializer",
+ ["bson", "pickle", "yaml", "json", "msgpack"])
+ def test_encode_exception_error_results(self, mongo_backend_factory,
+ serializer):
backend = mongo_backend_factory(serializer=serializer)
exception = Exception("Basic Exception")
backend.store_result(TASK_ID, exception, 'FAILURE')
From b08efaae648003c2fcb81ef732cf6b45de4534d8 Mon Sep 17 00:00:00 2001
From: Ash Berlin-Taylor
Date: Wed, 2 Sep 2020 17:26:16 +0100
Subject: [PATCH 013/157] Expose retry_policy for Redis result backend
Rather than adding a new top-level config option, I have used a new key
in the already existing setting `result_backend_transport_options`.
Closes #6166
---
celery/backends/redis.py | 30 +++++++++++++++-----------
docs/getting-started/brokers/redis.rst | 15 +++++++++++++
t/unit/backends/test_redis.py | 23 ++++++++++++++++++++
3 files changed, 55 insertions(+), 13 deletions(-)
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
index 660af701ac8..1b9db7433fe 100644
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -328,6 +328,15 @@ def _params_from_url(self, url, defaults):
connparams.update(query)
return connparams
+ @cached_property
+ def retry_policy(self):
+ retry_policy = super().retry_policy
+ if "retry_policy" in self._transport_options:
+ retry_policy = retry_policy.copy()
+ retry_policy.update(self._transport_options['retry_policy'])
+
+ return retry_policy
+
def on_task_call(self, producer, task_id):
if not task_join_will_block():
self.result_consumer.consume_from(task_id)
@@ -401,10 +410,11 @@ def apply_chord(self, header_result, body, **kwargs):
@cached_property
def _chord_zset(self):
- transport_options = self.app.conf.get(
- 'result_backend_transport_options', {}
- )
- return transport_options.get('result_chord_ordered', True)
+ return self._transport_options.get('result_chord_ordered', True)
+
+ @cached_property
+ def _transport_options(self):
+ return self.app.conf.get('result_backend_transport_options', {})
def on_chord_part_return(self, request, state, result,
propagate=None, **kwargs):
@@ -530,12 +540,8 @@ def _get_sentinel_instance(self, **params):
connparams = params.copy()
hosts = connparams.pop("hosts")
- result_backend_transport_opts = self.app.conf.get(
- "result_backend_transport_options", {})
- min_other_sentinels = result_backend_transport_opts.get(
- "min_other_sentinels", 0)
- sentinel_kwargs = result_backend_transport_opts.get(
- "sentinel_kwargs", {})
+ min_other_sentinels = self._transport_options.get("min_other_sentinels", 0)
+ sentinel_kwargs = self._transport_options.get("sentinel_kwargs", {})
sentinel_instance = self.sentinel.Sentinel(
[(cp['host'], cp['port']) for cp in hosts],
@@ -548,9 +554,7 @@ def _get_sentinel_instance(self, **params):
def _get_pool(self, **params):
sentinel_instance = self._get_sentinel_instance(**params)
- result_backend_transport_opts = self.app.conf.get(
- "result_backend_transport_options", {})
- master_name = result_backend_transport_opts.get("master_name", None)
+ master_name = self._transport_options.get("master_name", None)
return sentinel_instance.master_for(
service_name=master_name,
diff --git a/docs/getting-started/brokers/redis.rst b/docs/getting-started/brokers/redis.rst
index 9dde8c90868..52a9b6944b3 100644
--- a/docs/getting-started/brokers/redis.rst
+++ b/docs/getting-started/brokers/redis.rst
@@ -94,6 +94,21 @@ If you are using Sentinel, you should specify the master_name using the :setting
app.conf.result_backend_transport_options = {'master_name': "mymaster"}
+Connection timeouts
+^^^^^^^^^^^^^^^^^^^
+
+To configure the connection timeouts for the Redis result backend, use the ``retry_policy`` key under :setting:`result_backend_transport_options`:
+
+
+.. code-block:: python
+
+ app.conf.result_backend_transport_options = {
+ 'retry_policy': {
+ 'timeout': 5.0
+ }
+ }
+
+See :func:`~kombu.utils.functional.retry_over_time` for the possible retry policy options.
.. _redis-caveats:
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
index 1415978cbfc..7c3e3e7d908 100644
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -523,6 +523,29 @@ def test_on_connection_error(self, logger):
assert self.b.on_connection_error(10, exc, intervals, 3) == 30
logger.error.assert_called_with(self.E_LOST, 3, 10, 'in 30.00 seconds')
+ @patch('celery.backends.redis.retry_over_time')
+ def test_retry_policy_conf(self, retry_over_time):
+ self.app.conf.result_backend_transport_options = dict(
+ retry_policy=dict(
+ max_retries=2,
+ interval_start=0,
+ interval_step=0.01,
+ ),
+ )
+ b = self.Backend(app=self.app)
+
+ def fn():
+ return 1
+
+ # We don't want to re-test retry_over_time, just check we called it
+ # with the expected args
+ b.ensure(fn, (),)
+
+ retry_over_time.assert_called_with(
+ fn, b.connection_errors, (), {}, ANY,
+ max_retries=2, interval_start=0, interval_step=0.01, interval_max=1
+ )
+
def test_incr(self):
self.b.client = Mock(name='client')
self.b.incr('foo')
From 84329f12c77dc6aadbea35fa13101bf3ea59c5fd Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 10 Sep 2020 13:02:47 +0300
Subject: [PATCH 014/157] Update changelog for 4.3.1.
---
docs/history/changelog-4.3.rst | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/docs/history/changelog-4.3.rst b/docs/history/changelog-4.3.rst
index 7a73cd13104..0502c1de09e 100644
--- a/docs/history/changelog-4.3.rst
+++ b/docs/history/changelog-4.3.rst
@@ -8,6 +8,16 @@ This document contains change notes for bugfix releases in
the 4.3.x series, please see :ref:`whatsnew-4.3` for
an overview of what's new in Celery 4.3.
+4.3.1
+=====
+
+:release-date: 2020-09-10 1:00 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Limit vine version to be below 5.0.0.
+
+ Contributed by **Omer Katz**
+
4.3.0
=====
:release-date: 2019-03-31 7:00 P.M UTC+3:00
From acaf3976f3df13c2671b8c78f31a9f507488292c Mon Sep 17 00:00:00 2001
From: qiaocc
Date: Thu, 10 Sep 2020 18:15:09 +0800
Subject: [PATCH 015/157] fix typo (#6346)
---
Changelog.rst | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/Changelog.rst b/Changelog.rst
index c5126ef3bd6..0a37098c9f0 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -37,17 +37,17 @@ an overview of what's new in Celery 5.0.
:release-by: Omer Katz
- Allow to opt out of ordered group results when using the Redis result backend (#6290).
-- **Breaking Chnage** Remove the deprecated celery.utils.encoding module.
+- **Breaking Change** Remove the deprecated celery.utils.encoding module.
5.0.0b1
=======
:release-date: 2020-08-19 8.30 P.M UTC+3:00
:release-by: Omer Katz
-- **Breaking Chnage** Drop support for the Riak result backend (#5686).
-- **Breaking Chnage** pytest plugin is no longer enabled by default. (#6288)
+- **Breaking Change** Drop support for the Riak result backend (#5686).
+- **Breaking Change** pytest plugin is no longer enabled by default. (#6288)
Install pytest-celery to enable it.
-- **Breaking Chnage** Brand new CLI based on Click (#5718).
+- **Breaking Change** Brand new CLI based on Click (#5718).
5.0.0a2
=======
From 2e5ad55577251dc2aee565d541cd6332fc172ca0 Mon Sep 17 00:00:00 2001
From: Christian Clauss
Date: Thu, 10 Sep 2020 17:22:47 +0200
Subject: [PATCH 016/157] Travis CI: Test Python 3.9 release candidate 1
(#6328)
* Travis CI: Test Python 3.9 release candidate 1
* fixup! Travis CI: matrix --> jobs
* fixup! Fix indentation error
* fixup! tox.ini: 3.9 --> 3.9-dev
* Fix test failure in Python 3.9RC1.
Co-authored-by: Omer Katz
---
.travis.yml | 21 ++++++++++++++-------
celery/canvas.py | 6 ++++++
tox.ini | 11 ++++++-----
3 files changed, 26 insertions(+), 12 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index fe00bfb458f..96fb6f4d872 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,22 +5,32 @@ python:
- '3.6'
- '3.7'
- '3.8'
+ - '3.9-dev'
os:
- linux
stages:
- test
- integration
- lint
+services:
+ - redis
+ - docker
env:
global:
- PYTHONUNBUFFERED=yes
- CELERY_TOX_PARALLEL=
- matrix:
+ jobs:
- MATRIX_TOXENV=unit
-matrix:
+jobs:
fast_finish: true
+ allow_failures:
+ - python: '3.9-dev'
include:
+ - python: '3.9-dev'
+ env: MATRIX_TOXENV=integration-rabbitmq
+ stage: integration
+
- python: 3.8
env: MATRIX_TOXENV=integration-rabbitmq
stage: integration
@@ -112,14 +122,14 @@ before_install:
wget -qO - https://packages.couchbase.com/ubuntu/couchbase.key | sudo apt-key add -
sudo apt-add-repository -y 'deb http://packages.couchbase.com/ubuntu bionic bionic/main'
sudo apt-get update && sudo apt-get install -y libcouchbase-dev
+install: pip --disable-pip-version-check install --upgrade-strategy eager -U tox | cat
+script: tox $CELERY_TOX_PARALLEL -v -- -v
after_success:
- |
if [[ -v MATRIX_TOXENV || "$TOXENV" =~ "pypy" ]]; then
.tox/$TOXENV/bin/coverage xml
.tox/$TOXENV/bin/codecov -e TOXENV
fi;
-install: pip --disable-pip-version-check install --upgrade-strategy eager -U tox | cat
-script: tox $CELERY_TOX_PARALLEL -v -- -v
notifications:
email: false
irc:
@@ -127,6 +137,3 @@ notifications:
- "chat.freenode.net#celery"
on_success: change
on_failure: change
-services:
- - redis
- - docker
diff --git a/celery/canvas.py b/celery/canvas.py
index cb8e2978d8c..7871f7b395d 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -440,6 +440,12 @@ def __or__(self, other):
return _chain(self, other, app=self._app)
return NotImplemented
+ def __ior__(self, other):
+ # Python 3.9 introduces | as the merge operator for dicts.
+ # We override the in-place version of that operator
+ # so that canvases continue to work as they did before.
+ return self.__or__(other)
+
def election(self):
type = self.type
app = type.app
diff --git a/tox.ini b/tox.ini
index d2de6887a13..1b12965923a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
[tox]
envlist =
- {3.6,3.7,3.8,pypy3}-unit
- {3.6,3.7,3.8,pypy3}-integration-{rabbitmq,redis,dynamodb,azureblockblob,cache,cassandra,elasticsearch}
+ {3.6,3.7,3.8,3.9-dev,pypy3}-unit
+ {3.6,3.7,3.8,3.9-dev,pypy3}-integration-{rabbitmq,redis,dynamodb,azureblockblob,cache,cassandra,elasticsearch}
flake8
apicheck
@@ -14,9 +14,9 @@ deps=
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/pkgutils.txt
- 3.6,3.7,3.8: -r{toxinidir}/requirements/test-ci-default.txt
- 3.5,3.6,3.7,3.8: -r{toxinidir}/requirements/docs.txt
- 3.6,3.7,3.8: -r{toxinidir}/requirements/docs.txt
+ 3.6,3.7,3.8,3.9-dev: -r{toxinidir}/requirements/test-ci-default.txt
+ 3.5,3.6,3.7,3.8,3.9-dev: -r{toxinidir}/requirements/docs.txt
+ 3.6,3.7,3.8,3.9-dev: -r{toxinidir}/requirements/docs.txt
pypy3: -r{toxinidir}/requirements/test-ci-base.txt
integration: -r{toxinidir}/requirements/test-integration.txt
@@ -63,6 +63,7 @@ basepython =
3.6: python3.6
3.7: python3.7
3.8: python3.8
+ 3.9-dev: python3.9
pypy3: pypy3
flake8,apicheck,linkcheck,configcheck,bandit: python3.8
flakeplus: python2.7
From d28e340370daafb8b4550555a71089224a2442b3 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 14 Sep 2020 17:33:53 +0300
Subject: [PATCH 017/157] Fix the broken celery upgrade settings command.
---
celery/bin/upgrade.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/bin/upgrade.py b/celery/bin/upgrade.py
index fbad503e1f0..7087005f411 100644
--- a/celery/bin/upgrade.py
+++ b/celery/bin/upgrade.py
@@ -20,7 +20,7 @@ def _slurp(filename):
return [line for line in read_fh]
-def _compat_key(self, key, namespace='CELERY'):
+def _compat_key(key, namespace='CELERY'):
key = key.upper()
if not key.startswith(namespace):
key = '_'.join([namespace, key])
From 9b5f6f5531f45525e904cef114d653a93f1c9635 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 14 Sep 2020 17:41:45 +0300
Subject: [PATCH 018/157] Fix celery migrate settings options.
---
celery/bin/upgrade.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/celery/bin/upgrade.py b/celery/bin/upgrade.py
index 7087005f411..66eb27caaea 100644
--- a/celery/bin/upgrade.py
+++ b/celery/bin/upgrade.py
@@ -52,12 +52,12 @@ def _to_new_key(line, keyfilter=pass1, source=defaults._TO_NEW_KEY):
@upgrade.command(cls=CeleryCommand)
@click.argument('filename')
-@click.option('-django',
+@click.option('--django',
cls=CeleryOption,
is_flag=True,
help_group='Upgrading Options',
help='Upgrade Django project.')
-@click.option('-compat',
+@click.option('--compat',
cls=CeleryOption,
is_flag=True,
help_group='Upgrading Options',
@@ -66,7 +66,7 @@ def _to_new_key(line, keyfilter=pass1, source=defaults._TO_NEW_KEY):
cls=CeleryOption,
is_flag=True,
help_group='Upgrading Options',
- help='Dont backup original files.')
+ help="Don't backup original files.")
def settings(filename, django, compat, no_backup):
"""Migrate settings from Celery 3.x to Celery 4.x."""
lines = _slurp(filename)
From e8b3e84c5f8b98edf6577c0d6e909edd801119b8 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 17 Sep 2020 11:04:23 +0300
Subject: [PATCH 019/157] Remove Riak result backend settings.
---
celery/app/defaults.py | 5 ---
docs/userguide/configuration.rst | 74 --------------------------------
2 files changed, 79 deletions(-)
diff --git a/celery/app/defaults.py b/celery/app/defaults.py
index 9d1b140ea2a..d0fa9d20b54 100644
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -216,11 +216,6 @@ def __repr__(self):
timeout=Option(type='float'),
save_meta_as_text=Option(True, type='bool'),
),
- riak=Namespace(
- __old__=old_ns('celery_riak'),
-
- backend_settings=Option(type='dict'),
- ),
security=Namespace(
__old__=old_ns('celery_security'),
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index cfb413eb156..384be135b42 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -1583,80 +1583,6 @@ Default: :const:`True`
Should meta saved as text or as native json.
Result is always serialized as text.
-.. _conf-riak-result-backend:
-
-Riak backend settings
----------------------
-
-.. note::
-
- The Riak backend requires the :pypi:`riak` library.
-
- To install the this package use :command:`pip`:
-
- .. code-block:: console
-
- $ pip install celery[riak]
-
- See :ref:`bundles` for information on combining multiple extension
- requirements.
-
-This backend requires the :setting:`result_backend`
-setting to be set to a Riak URL::
-
- result_backend = 'riak://host:port/bucket'
-
-For example::
-
- result_backend = 'riak://localhost/celery
-
-is the same as::
-
- result_backend = 'riak://'
-
-The fields of the URL are defined as follows:
-
-#. ``host``
-
- Host name or IP address of the Riak server (e.g., `'localhost'`).
-
-#. ``port``
-
- Port to the Riak server using the protobuf protocol. Default is 8087.
-
-#. ``bucket``
-
- Bucket name to use. Default is `celery`.
- The bucket needs to be a string with ASCII characters only.
-
-Alternatively, this backend can be configured with the following configuration directives.
-
-.. setting:: riak_backend_settings
-
-``riak_backend_settings``
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Default: ``{}`` (empty mapping).
-
-This is a dict supporting the following keys:
-
-* ``host``
-
- The host name of the Riak server. Defaults to ``"localhost"``.
-
-* ``port``
-
- The port the Riak server is listening to. Defaults to 8087.
-
-* ``bucket``
-
- The bucket name to connect to. Defaults to "celery".
-
-* ``protocol``
-
- The protocol to use to connect to the Riak server. This isn't configurable
- via :setting:`result_backend`
-
.. _conf-dynamodb-result-backend:
AWS DynamoDB backend settings
From 21d0499cf2213498a0d8024f329b0fc128257408 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 17 Sep 2020 12:12:13 +0300
Subject: [PATCH 020/157] Rephrase to mention that 3.5 is also EOL.
---
docs/whatsnew-5.0.rst | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 267cd3fb050..11e5530935b 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -120,8 +120,7 @@ Python 2.7 has reached EOL in January 2020.
In order to focus our efforts we have dropped support for Python 2.7 in
this version.
-In addition Python 3.5 does not provide us with the features we need to move
-forward towards Celery 6.x.
+In addition, Python 3.5 has reached EOL in September 2020.
Therefore, we are also dropping support for Python 3.5.
If you still require to run Celery using Python 2.7 or Python 3.5
From 14a3524253f7769118fcb23de12b6707f38a1701 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 17 Sep 2020 12:19:46 +0300
Subject: [PATCH 021/157] Add a note about the removal of the Riak result
backend.
---
docs/whatsnew-5.0.rst | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 11e5530935b..e2373e21c59 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -163,6 +163,21 @@ for the database client is couchbase 3.0.0.
To verify that your Couchbase Server is compatible with the V3 SDK,
please refer to their `documentation `_.
+Riak Result Backend
+-------------------
+
+The Riak result backend has been removed as the database is no longer maintained.
+
+The Python client only supports Python 3.6 and below which prevents us from
+supporting it and it is also unmaintained.
+
+If you are still using Riak, refrain from upgrading to Celery 5.0 while you
+migrate your application to a different database.
+
+We apologize for the lack of notice in advance but we feel that the chance
+you'll be affected by this breaking change is minimal which is why we
+did it.
+
.. _v500-news:
News
From f6b3e13f5cb69eb4fef5ff202b54f8e8a51737b1 Mon Sep 17 00:00:00 2001
From: Weiliang Li
Date: Sun, 20 Sep 2020 03:40:39 +0900
Subject: [PATCH 022/157] Fix examples of starting a worker in comments (#6331)
---
celery/bin/worker.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index da35f665728..4d4c57aea16 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -273,10 +273,10 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
Examples
--------
- $ celery worker --app=proj -l info
- $ celery worker -A proj -l info -Q hipri,lopri
- $ celery worker -A proj --concurrency=4
- $ celery worker -A proj --concurrency=1000 -P eventlet
+ $ celery --app=proj worker -l INFO
+ $ celery -A proj worker -l INFO -Q hipri,lopri
+ $ celery -A proj worker --concurrency=4
+ $ celery -A proj worker --concurrency=1000 -P eventlet
$ celery worker --autoscale=10,0
"""
From 5a0c45857640f2415567736ad7ad2b7ae69e1304 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 21 Sep 2020 11:47:07 +0300
Subject: [PATCH 023/157] Remove deprecated function from app.log.Logging.
---
celery/app/log.py | 5 -----
t/unit/app/test_log.py | 6 +++++-
2 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/celery/app/log.py b/celery/app/log.py
index 2a845a73b5f..e96f1174221 100644
--- a/celery/app/log.py
+++ b/celery/app/log.py
@@ -233,11 +233,6 @@ def _is_configured(self, logger):
return self._has_handler(logger) and not getattr(
logger, '_rudimentary_setup', False)
- def setup_logger(self, name='celery', *args, **kwargs):
- """Deprecated: No longer used."""
- self.setup_logging_subsystem(*args, **kwargs)
- return logging.root
-
def get_default_logger(self, name='celery', **kwargs):
return get_logger(name)
diff --git a/t/unit/app/test_log.py b/t/unit/app/test_log.py
index fa780cce80a..453c3f26702 100644
--- a/t/unit/app/test_log.py
+++ b/t/unit/app/test_log.py
@@ -149,8 +149,12 @@ def getMessage(self):
class test_default_logger:
+ def setup_logger(self, *args, **kwargs):
+ self.app.log.setup_logging_subsystem(*args, **kwargs)
+
+ return logging.root
+
def setup(self):
- self.setup_logger = self.app.log.setup_logger
self.get_logger = lambda n=None: get_logger(n) if n else logging.root
signals.setup_logging.receivers[:] = []
self.app.log.already_setup = False
From a09439e7501444687e9b935461fd77430b552668 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 21 Sep 2020 15:09:14 +0300
Subject: [PATCH 024/157] Migration guide.
---
docs/whatsnew-5.0.rst | 35 ++++++++++++++++++++++++++++++++++-
1 file changed, 34 insertions(+), 1 deletion(-)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index e2373e21c59..00e78674019 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -94,7 +94,40 @@ Wall of Contributors
Upgrading from Celery 4.x
=========================
-Please read the important notes below as there are several breaking changes.
+Step 1: Adjust your command line invocation
+-------------------------------------------
+
+Celery 5.0 introduces a new CLI implementation which isn't completely backwards compatible.
+
+The global options can no longer be positioned after the sub-command.
+Instead, they must be positioned as an option for the `celery` command like so::
+
+ celery --app path.to.app worker
+
+If you were using our :ref:`daemonizing` guide to deploy Celery in production,
+you should revisit it for updates.
+
+Step 2: Update your configuration with the new setting names
+------------------------------------------------------------
+
+If you haven't already updated your configuration when you migrated to Celery 4.0,
+please do so now.
+
+We elected to extend the deprecation period until 6.0 since
+we did not loudly warn about using these deprecated settings.
+
+Please refer to the :ref:`migration guide ` for instructions.
+
+Step 3: Read the important notes in this document
+-------------------------------------------------
+
+Make sure you are not affected by any of the important upgrade notes
+mentioned in the :ref:`following section `.
+
+Step 4: Upgrade to Celery 5.0
+-----------------------------
+
+At this point you can upgrade your workers and clients with the new version.
.. _v500-important:
From 782bac07ab711e0965f457ef4a379944848def70 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 21 Sep 2020 15:18:47 +0300
Subject: [PATCH 025/157] Document breaking changes for the CLI in the Whats
New document.
---
docs/whatsnew-5.0.rst | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 00e78674019..72a60bc2eb5 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -124,6 +124,9 @@ Step 3: Read the important notes in this document
Make sure you are not affected by any of the important upgrade notes
mentioned in the :ref:`following section `.
+You should mainly verify that any of the breaking changes in the CLI
+do not affect you. Please refer to :ref:`New Command Line Interface` for details.
+
Step 4: Upgrade to Celery 5.0
-----------------------------
@@ -211,6 +214,18 @@ We apologize for the lack of notice in advance but we feel that the chance
you'll be affected by this breaking change is minimal which is why we
did it.
+New Command Line Interface
+--------------------------
+
+The command line interface has been revamped using Click.
+As a result a few breaking changes has been introduced:
+
+- Postfix global options like `celery worker --app path.to.app` or `celery worker --workdir /path/to/workdir` are no longer supported.
+ You should specify them as part of the global options of the main celery command.
+- :program:`celery amqp` and :program:`celery shell` require the `repl`
+ sub command to start a shell. You can now invoke specific shell commands
+ without a shell. Type `celery amqp --help` or `celery shell --help` for details.
+
.. _v500-news:
News
From a211e8e290106d17999ab490948969a5169a220c Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 21 Sep 2020 15:27:41 +0300
Subject: [PATCH 026/157] Add a "port code to Python 3" migration step.
---
docs/whatsnew-5.0.rst | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 72a60bc2eb5..5c4f1f70958 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -127,7 +127,22 @@ mentioned in the :ref:`following section `.
You should mainly verify that any of the breaking changes in the CLI
do not affect you. Please refer to :ref:`New Command Line Interface` for details.
-Step 4: Upgrade to Celery 5.0
+Step 4: Migrate your code to Python 3
+-------------------------------------
+
+Celery 5.0 supports only Python 3. Therefore, you must ensure your code is
+compatible with Python 3.
+
+If you haven't ported your code to Python 3, you must do so before upgrading.
+
+You can use tools like `2to3 `_
+and `pyupgrade `_ to assist you with
+this effort.
+
+After the migration is done, run your test suite with Celery 4 to ensure
+nothing has been broken.
+
+Step 5: Upgrade to Celery 5.0
-----------------------------
At this point you can upgrade your workers and clients with the new version.
From 937844c048d9f8245dc6f0c3c349cf9d7d903cda Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 21 Sep 2020 17:13:46 +0300
Subject: [PATCH 027/157] Update supported Python versions in the introduction
document.
---
docs/getting-started/introduction.rst | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/docs/getting-started/introduction.rst b/docs/getting-started/introduction.rst
index dc6b862b4f5..ea2162467ae 100644
--- a/docs/getting-started/introduction.rst
+++ b/docs/getting-started/introduction.rst
@@ -39,17 +39,18 @@ What do I need?
===============
.. sidebar:: Version Requirements
- :subtitle: Celery version 4.0 runs on
+ :subtitle: Celery version 5.0 runs on
- - Python ❨2.7, 3.4, 3.5❩
- - PyPy ❨5.4, 5.5❩
+ - Python ❨3.6, 3.7, 3.8❩
+ - PyPy3.6 ❨7.3❩
- This is the last version to support Python 2.7,
- and from the next version (Celery 5.x) Python 3.5 or newer is required.
+ Celery 4.x was the last version to support Python 2.7,
+ Celery 5.x requires Python 3.6 or newer is required.
If you're running an older version of Python, you need to be running
an older version of Celery:
+ - Python 2.7 or Python 3.5: Celery series 4.4 or earlier.
- Python 2.6: Celery series 3.1 or earlier.
- Python 2.5: Celery series 3.0 or earlier.
- Python 2.4 was Celery series 2.2 or earlier.
From d505db6b3c68e20778e044e49a1627808a2e11c5 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 23 Sep 2020 13:34:30 +0300
Subject: [PATCH 028/157] Update bash completion.
---
extra/bash-completion/celery.bash | 142 ++++--------------------------
1 file changed, 16 insertions(+), 126 deletions(-)
diff --git a/extra/bash-completion/celery.bash b/extra/bash-completion/celery.bash
index 2595557138e..f3603f5a237 100644
--- a/extra/bash-completion/celery.bash
+++ b/extra/bash-completion/celery.bash
@@ -1,131 +1,21 @@
-# This is a bash completion script for celery
-# Redirect it to a file, then source it or copy it to /etc/bash_completion.d
-# to get tab completion. celery must be on your PATH for this to work.
-_celery()
-{
- local cur basep opts base kval kkey loglevels prevp in_opt controlargs
- local pools
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prevp="${COMP_WORDS[COMP_CWORD-1]}"
- basep="${COMP_WORDS[1]}"
- opts="worker events beat shell multi amqp status
- inspect control purge list migrate call result
- report upgrade flower graph logtool help"
- fargs="--app= --broker= --loader= --config= --version"
- dopts="--detach --umask= --gid= --uid= --pidfile=
- --logfile= --loglevel= --executable="
- controlargs="--timeout --destination"
- pools="prefork eventlet gevent solo"
- loglevels="critical error warning info debug"
- in_opt=0
-
- # find the current sub-command, store in basep'
- for index in $(seq 1 $((${#COMP_WORDS[@]} - 2)))
- do
- basep=${COMP_WORDS[$index]}
- if [ "${basep:0:2}" != "--" ]; then
- break;
- fi
- done
-
- if [ "${cur:0:2}" == "--" -a "$cur" != "${cur//=}" ]; then
- in_opt=1
- kkey="${cur%=*}"
- kval="${cur#*=}"
- elif [ "${prevp:0:1}" == "-" ]; then
- in_opt=1
- kkey="$prevp"
- kval="$cur"
- fi
+_celery_completion() {
+ local IFS=$'
+'
+ COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \
+ COMP_CWORD=$COMP_CWORD \
+ _CELERY_COMPLETE=complete $1 ) )
+ return 0
+}
- if [ $in_opt -eq 1 ]; then
- case "${kkey}" in
- --uid|-u)
- COMPREPLY=( $(compgen -u -- "$kval") )
- return 0
- ;;
- --gid|-g)
- COMPREPLY=( $(compgen -g -- "$kval") )
- return 0
- ;;
- --pidfile|--logfile|-p|-f|--statedb|-S|-s|--schedule-filename)
- COMPREPLY=( $(compgen -f -- "$kval") )
- return 0
- ;;
- --workdir)
- COMPREPLY=( $(compgen -d -- "$kval") )
- return 0
- ;;
- --loglevel|-l)
- COMPREPLY=( $(compgen -W "$loglevels" -- "$kval") )
- return 0
- ;;
- --pool|-P)
- COMPREPLY=( $(compgen -W "$pools" -- "$kval") )
- return 0
- ;;
- *)
- ;;
- esac
+_celery_completionetup() {
+ local COMPLETION_OPTIONS=""
+ local BASH_VERSION_ARR=(${BASH_VERSION//./ })
+ # Only BASH version 4.4 and later have the nosort option.
+ if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then
+ COMPLETION_OPTIONS="-o nosort"
fi
- case "${basep}" in
- worker)
- COMPREPLY=( $(compgen -W '--concurrency= --pool= --purge --logfile=
- --loglevel= --hostname= --beat --schedule= --scheduler= --statedb= --events
- --time-limit= --soft-time-limit= --max-tasks-per-child= --queues=
- --include= --pidfile= --autoscale $fargs' -- ${cur} ) )
- return 0
- ;;
- inspect)
- COMPREPLY=( $(compgen -W 'active active_queues ping registered report
- reserved revoked scheduled stats --help $controlargs $fargs' -- ${cur}) )
- return 0
- ;;
- control)
- COMPREPLY=( $(compgen -W 'add_consumer autoscale cancel_consumer
- disable_events enable_events pool_grow pool_shrink
- rate_limit time_limit --help $controlargs $fargs' -- ${cur}) )
- return 0
- ;;
- multi)
- COMPREPLY=( $(compgen -W 'start restart stopwait stop show
- kill names expand get help --quiet --nosplash
- --verbose --no-color --help $fargs' -- ${cur} ) )
- return 0
- ;;
- amqp)
- COMPREPLY=( $(compgen -W 'queue.declare queue.purge exchange.delete
- basic.publish exchange.declare queue.delete queue.bind
- basic.get --help $fargs' -- ${cur} ))
- return 0
- ;;
- list)
- COMPREPLY=( $(compgen -W 'bindings $fargs' -- ${cur} ) )
- return 0
- ;;
- shell)
- COMPREPLY=( $(compgen -W '--ipython --bpython --python
- --without-tasks --eventlet --gevent $fargs' -- ${cur} ) )
- return 0
- ;;
- beat)
- COMPREPLY=( $(compgen -W '--schedule= --scheduler=
- --max-interval= $dopts $fargs' -- ${cur} ))
- return 0
- ;;
- events)
- COMPREPLY=( $(compgen -W '--dump --camera= --freq=
- --maxrate= $dopts $fargs' -- ${cur}))
- return 0
- ;;
- *)
- ;;
- esac
-
- COMPREPLY=($(compgen -W "${opts} ${fargs}" -- ${cur}))
- return 0
+ complete $COMPLETION_OPTIONS -F _celery_completion celery
}
-complete -F _celery celery
+_celery_completionetup;
From 28231cb7ef9272de11a5affb0db73fb9da57bfda Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 23 Sep 2020 15:18:21 +0300
Subject: [PATCH 029/157] Add note about new shell completion.
---
docs/whatsnew-5.0.rst | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 5c4f1f70958..d30f60ba34b 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -125,7 +125,7 @@ Make sure you are not affected by any of the important upgrade notes
mentioned in the :ref:`following section `.
You should mainly verify that any of the breaking changes in the CLI
-do not affect you. Please refer to :ref:`New Command Line Interface` for details.
+do not affect you. Please refer to :ref:`New Command Line Interface ` for details.
Step 4: Migrate your code to Python 3
-------------------------------------
@@ -229,6 +229,8 @@ We apologize for the lack of notice in advance but we feel that the chance
you'll be affected by this breaking change is minimal which is why we
did it.
+.. _new_command_line_interface:
+
New Command Line Interface
--------------------------
@@ -241,6 +243,13 @@ As a result a few breaking changes has been introduced:
sub command to start a shell. You can now invoke specific shell commands
without a shell. Type `celery amqp --help` or `celery shell --help` for details.
+Click provides shell completion `out of the box `_.
+This functionality replaces our previous bash completion script and adds
+completion support for the zsh and fish shells.
+
+The bash completion script was exported to `extras/celery.bash `_
+for the packager's convenience.
+
.. _v500-news:
News
From 802ead0379767c1032e441b6c6275db263939963 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 23 Sep 2020 15:21:32 +0300
Subject: [PATCH 030/157] Update daemonization docs.
---
docs/userguide/daemonizing.rst | 18 ++++++++----------
1 file changed, 8 insertions(+), 10 deletions(-)
diff --git a/docs/userguide/daemonizing.rst b/docs/userguide/daemonizing.rst
index 1f69159dfcb..07e39009c97 100644
--- a/docs/userguide/daemonizing.rst
+++ b/docs/userguide/daemonizing.rst
@@ -72,13 +72,11 @@ the worker you must also export them (e.g., :command:`export DISPLAY=":0"`)
.. code-block:: console
- $ celery multi start worker1 \
- -A proj \
+ $ celery -A proj multi start worker1 \
--pidfile="$HOME/run/celery/%n.pid" \
--logfile="$HOME/log/celery/%n%I.log"
- $ celery multi restart worker1 \
- -A proj \
+ $ celery -A proj multi restart worker1 \
--logfile="$HOME/log/celery/%n%I.log" \
--pidfile="$HOME/run/celery/%n.pid
@@ -401,13 +399,13 @@ This is an example systemd file:
Group=celery
EnvironmentFile=/etc/conf.d/celery
WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} multi start ${CELERYD_NODES} \
- -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \
+ ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi start ${CELERYD_NODES} \
+ --pidfile=${CELERYD_PID_FILE} \
--logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE}'
- ExecReload=/bin/sh -c '${CELERY_BIN} multi restart ${CELERYD_NODES} \
- -A ${CELERY_APP} --pidfile=${CELERYD_PID_FILE} \
+ ExecReload=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi restart ${CELERYD_NODES} \
+ --pidfile=${CELERYD_PID_FILE} \
--logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
[Install]
@@ -494,8 +492,8 @@ This is an example systemd file for Celery Beat:
Group=celery
EnvironmentFile=/etc/conf.d/celery
WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} beat \
- -A ${CELERY_APP} --pidfile=${CELERYBEAT_PID_FILE} \
+ ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
+ --pidfile=${CELERYBEAT_PID_FILE} \
--logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
[Install]
From 5a919c06793b394c4ea0d65b14bb9ae167a920c8 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 23 Sep 2020 18:00:47 +0300
Subject: [PATCH 031/157] Remove amqp backend. (#6360)
Fixes #6356.
---
celery/backends/amqp.py | 322 ------------------
.../reference/celery.backends.amqp.rst | 11 -
docs/internals/reference/index.rst | 1 -
docs/whatsnew-5.0.rst | 5 +
t/unit/app/test_backends.py | 2 -
t/unit/backends/test_amqp.py | 305 -----------------
6 files changed, 5 insertions(+), 641 deletions(-)
delete mode 100644 celery/backends/amqp.py
delete mode 100644 docs/internals/reference/celery.backends.amqp.rst
delete mode 100644 t/unit/backends/test_amqp.py
diff --git a/celery/backends/amqp.py b/celery/backends/amqp.py
deleted file mode 100644
index 6695aff277a..00000000000
--- a/celery/backends/amqp.py
+++ /dev/null
@@ -1,322 +0,0 @@
-"""The old AMQP result backend, deprecated and replaced by the RPC backend."""
-import socket
-import time
-from collections import deque
-from operator import itemgetter
-
-from kombu import Consumer, Exchange, Producer, Queue
-
-from celery import states
-from celery.exceptions import TimeoutError
-from celery.utils import deprecated
-from celery.utils.log import get_logger
-
-from .base import BaseBackend
-
-__all__ = ('BacklogLimitExceeded', 'AMQPBackend')
-
-logger = get_logger(__name__)
-
-
-class BacklogLimitExceeded(Exception):
- """Too much state history to fast-forward."""
-
-
-def repair_uuid(s):
- # Historically the dashes in UUIDS are removed from AMQ entity names,
- # but there's no known reason to. Hopefully we'll be able to fix
- # this in v4.0.
- return '{}-{}-{}-{}-{}'.format(s[:8], s[8:12], s[12:16], s[16:20], s[20:])
-
-
-class NoCacheQueue(Queue):
- can_cache_declaration = False
-
-
-class AMQPBackend(BaseBackend):
- """The AMQP result backend.
-
- Deprecated: Please use the RPC backend or a persistent backend.
- """
-
- Exchange = Exchange
- Queue = NoCacheQueue
- Consumer = Consumer
- Producer = Producer
-
- BacklogLimitExceeded = BacklogLimitExceeded
-
- persistent = True
- supports_autoexpire = True
- supports_native_join = True
-
- retry_policy = {
- 'max_retries': 20,
- 'interval_start': 0,
- 'interval_step': 1,
- 'interval_max': 1,
- }
-
- def __init__(self, app, connection=None, exchange=None, exchange_type=None,
- persistent=None, serializer=None, auto_delete=True, **kwargs):
- deprecated.warn(
- 'The AMQP result backend', deprecation='4.0', removal='5.0',
- alternative='Please use RPC backend or a persistent backend.')
- super().__init__(app, **kwargs)
- conf = self.app.conf
- self._connection = connection
- self.persistent = self.prepare_persistent(persistent)
- self.delivery_mode = 2 if self.persistent else 1
- exchange = exchange or conf.result_exchange
- exchange_type = exchange_type or conf.result_exchange_type
- self.exchange = self._create_exchange(
- exchange, exchange_type, self.delivery_mode,
- )
- self.serializer = serializer or conf.result_serializer
- self.auto_delete = auto_delete
-
- def _create_exchange(self, name, type='direct', delivery_mode=2):
- return self.Exchange(name=name,
- type=type,
- delivery_mode=delivery_mode,
- durable=self.persistent,
- auto_delete=False)
-
- def _create_binding(self, task_id):
- name = self.rkey(task_id)
- return self.Queue(
- name=name,
- exchange=self.exchange,
- routing_key=name,
- durable=self.persistent,
- auto_delete=self.auto_delete,
- expires=self.expires,
- )
-
- def revive(self, channel):
- pass
-
- def rkey(self, task_id):
- return task_id.replace('-', '')
-
- def destination_for(self, task_id, request):
- if request:
- return self.rkey(task_id), request.correlation_id or task_id
- return self.rkey(task_id), task_id
-
- def store_result(self, task_id, result, state,
- traceback=None, request=None, **kwargs):
- """Send task return value and state."""
- routing_key, correlation_id = self.destination_for(task_id, request)
- if not routing_key:
- return
-
- payload = {'task_id': task_id, 'status': state,
- 'result': self.encode_result(result, state),
- 'traceback': traceback,
- 'children': self.current_task_children(request)}
- if self.app.conf.find_value_for_key('extended', 'result'):
- payload['name'] = getattr(request, 'task_name', None)
- payload['args'] = getattr(request, 'args', None)
- payload['kwargs'] = getattr(request, 'kwargs', None)
- payload['worker'] = getattr(request, 'hostname', None)
- payload['retries'] = getattr(request, 'retries', None)
- payload['queue'] = request.delivery_info.get('routing_key')\
- if hasattr(request, 'delivery_info') \
- and request.delivery_info else None
-
- with self.app.amqp.producer_pool.acquire(block=True) as producer:
- producer.publish(
- payload,
- exchange=self.exchange,
- routing_key=routing_key,
- correlation_id=correlation_id,
- serializer=self.serializer,
- retry=True, retry_policy=self.retry_policy,
- declare=self.on_reply_declare(task_id),
- delivery_mode=self.delivery_mode,
- )
-
- def on_reply_declare(self, task_id):
- return [self._create_binding(task_id)]
-
- def wait_for(self, task_id, timeout=None, cache=True,
- no_ack=True, on_interval=None,
- READY_STATES=states.READY_STATES,
- PROPAGATE_STATES=states.PROPAGATE_STATES,
- **kwargs):
- cached_meta = self._cache.get(task_id)
- if cache and cached_meta and \
- cached_meta['status'] in READY_STATES:
- return cached_meta
- try:
- return self.consume(task_id, timeout=timeout, no_ack=no_ack,
- on_interval=on_interval)
- except socket.timeout:
- raise TimeoutError('The operation timed out.')
-
- def get_task_meta(self, task_id, backlog_limit=1000):
- # Polling and using basic_get
- with self.app.pool.acquire_channel(block=True) as (_, channel):
- binding = self._create_binding(task_id)(channel)
- binding.declare()
-
- prev = latest = acc = None
- for i in range(backlog_limit): # spool ffwd
- acc = binding.get(
- accept=self.accept, no_ack=False,
- )
- if not acc: # no more messages
- break
- if acc.payload['task_id'] == task_id:
- prev, latest = latest, acc
- if prev:
- # backends are not expected to keep history,
- # so we delete everything except the most recent state.
- prev.ack()
- prev = None
- else:
- raise self.BacklogLimitExceeded(task_id)
-
- if latest:
- payload = self._cache[task_id] = self.meta_from_decoded(
- latest.payload)
- latest.requeue()
- return payload
- else:
- # no new state, use previous
- try:
- return self._cache[task_id]
- except KeyError:
- # result probably pending.
- return {'status': states.PENDING, 'result': None}
- poll = get_task_meta # XXX compat
-
- def drain_events(self, connection, consumer,
- timeout=None, on_interval=None, now=time.monotonic, wait=None):
- wait = wait or connection.drain_events
- results = {}
-
- def callback(meta, message):
- if meta['status'] in states.READY_STATES:
- results[meta['task_id']] = self.meta_from_decoded(meta)
-
- consumer.callbacks[:] = [callback]
- time_start = now()
-
- while 1:
- # Total time spent may exceed a single call to wait()
- if timeout and now() - time_start >= timeout:
- raise socket.timeout()
- try:
- wait(timeout=1)
- except socket.timeout:
- pass
- if on_interval:
- on_interval()
- if results: # got event on the wanted channel.
- break
- self._cache.update(results)
- return results
-
- def consume(self, task_id, timeout=None, no_ack=True, on_interval=None):
- wait = self.drain_events
- with self.app.pool.acquire_channel(block=True) as (conn, channel):
- binding = self._create_binding(task_id)
- with self.Consumer(channel, binding,
- no_ack=no_ack, accept=self.accept) as consumer:
- while 1:
- try:
- return wait(
- conn, consumer, timeout, on_interval)[task_id]
- except KeyError:
- continue
-
- def _many_bindings(self, ids):
- return [self._create_binding(task_id) for task_id in ids]
-
- def get_many(self, task_ids, timeout=None, no_ack=True,
- on_message=None, on_interval=None,
- now=time.monotonic, getfields=itemgetter('status', 'task_id'),
- READY_STATES=states.READY_STATES,
- PROPAGATE_STATES=states.PROPAGATE_STATES, **kwargs):
- with self.app.pool.acquire_channel(block=True) as (conn, channel):
- ids = set(task_ids)
- cached_ids = set()
- mark_cached = cached_ids.add
- for task_id in ids:
- try:
- cached = self._cache[task_id]
- except KeyError:
- pass
- else:
- if cached['status'] in READY_STATES:
- yield task_id, cached
- mark_cached(task_id)
- ids.difference_update(cached_ids)
- results = deque()
- push_result = results.append
- push_cache = self._cache.__setitem__
- decode_result = self.meta_from_decoded
-
- def _on_message(message):
- body = decode_result(message.decode())
- if on_message is not None:
- on_message(body)
- state, uid = getfields(body)
- if state in READY_STATES:
- push_result(body) \
- if uid in task_ids else push_cache(uid, body)
-
- bindings = self._many_bindings(task_ids)
- with self.Consumer(channel, bindings, on_message=_on_message,
- accept=self.accept, no_ack=no_ack):
- wait = conn.drain_events
- popleft = results.popleft
- while ids:
- wait(timeout=timeout)
- while results:
- state = popleft()
- task_id = state['task_id']
- ids.discard(task_id)
- push_cache(task_id, state)
- yield task_id, state
- if on_interval:
- on_interval()
-
- def reload_task_result(self, task_id):
- raise NotImplementedError(
- 'reload_task_result is not supported by this backend.')
-
- def reload_group_result(self, task_id):
- """Reload group result, even if it has been previously fetched."""
- raise NotImplementedError(
- 'reload_group_result is not supported by this backend.')
-
- def save_group(self, group_id, result):
- raise NotImplementedError(
- 'save_group is not supported by this backend.')
-
- def restore_group(self, group_id, cache=True):
- raise NotImplementedError(
- 'restore_group is not supported by this backend.')
-
- def delete_group(self, group_id):
- raise NotImplementedError(
- 'delete_group is not supported by this backend.')
-
- def __reduce__(self, args=(), kwargs=None):
- kwargs = kwargs if kwargs else {}
- kwargs.update(
- connection=self._connection,
- exchange=self.exchange.name,
- exchange_type=self.exchange.type,
- persistent=self.persistent,
- serializer=self.serializer,
- auto_delete=self.auto_delete,
- expires=self.expires,
- )
- return super().__reduce__(args, kwargs)
-
- def as_uri(self, include_password=True):
- return 'amqp://'
diff --git a/docs/internals/reference/celery.backends.amqp.rst b/docs/internals/reference/celery.backends.amqp.rst
deleted file mode 100644
index 61c99429fda..00000000000
--- a/docs/internals/reference/celery.backends.amqp.rst
+++ /dev/null
@@ -1,11 +0,0 @@
-=======================================
- ``celery.backends.amqp``
-=======================================
-
-.. contents::
- :local:
-.. currentmodule:: celery.backends.amqp
-
-.. automodule:: celery.backends.amqp
- :members:
- :undoc-members:
diff --git a/docs/internals/reference/index.rst b/docs/internals/reference/index.rst
index 87d07618928..cd587b8ae76 100644
--- a/docs/internals/reference/index.rst
+++ b/docs/internals/reference/index.rst
@@ -27,7 +27,6 @@
celery.backends.azureblockblob
celery.backends.rpc
celery.backends.database
- celery.backends.amqp
celery.backends.cache
celery.backends.consul
celery.backends.couchdb
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index d30f60ba34b..b062a275060 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -229,6 +229,11 @@ We apologize for the lack of notice in advance but we feel that the chance
you'll be affected by this breaking change is minimal which is why we
did it.
+AMQP Result Backend
+-------------------
+
+The AMQP result backend has been removed as it was deprecated in version 4.0.
+
.. _new_command_line_interface:
New Command Line Interface
diff --git a/t/unit/app/test_backends.py b/t/unit/app/test_backends.py
index 4dd54f99ead..a87f9665053 100644
--- a/t/unit/app/test_backends.py
+++ b/t/unit/app/test_backends.py
@@ -3,7 +3,6 @@
import pytest
from celery.app import backends
-from celery.backends.amqp import AMQPBackend
from celery.backends.cache import CacheBackend
from celery.exceptions import ImproperlyConfigured
@@ -11,7 +10,6 @@
class test_backends:
@pytest.mark.parametrize('url,expect_cls', [
- ('amqp://', AMQPBackend),
('cache+memory://', CacheBackend),
])
def test_get_backend_aliases(self, url, expect_cls, app):
diff --git a/t/unit/backends/test_amqp.py b/t/unit/backends/test_amqp.py
deleted file mode 100644
index 09f4d49519d..00000000000
--- a/t/unit/backends/test_amqp.py
+++ /dev/null
@@ -1,305 +0,0 @@
-import pickle
-from contextlib import contextmanager
-from datetime import timedelta
-from pickle import dumps, loads
-from queue import Empty, Queue
-from unittest.mock import Mock
-
-import pytest
-from billiard.einfo import ExceptionInfo
-from case import mock
-
-from celery import states, uuid
-from celery.app.task import Context
-from celery.backends.amqp import AMQPBackend
-from celery.result import AsyncResult
-
-
-class SomeClass:
-
- def __init__(self, data):
- self.data = data
-
-
-class test_AMQPBackend:
-
- def setup(self):
- self.app.conf.result_cache_max = 100
-
- def create_backend(self, **opts):
- opts = dict({'serializer': 'pickle', 'persistent': True}, **opts)
- return AMQPBackend(self.app, **opts)
-
- def test_destination_for(self):
- b = self.create_backend()
- request = Mock()
- assert b.destination_for('id', request) == (
- b.rkey('id'), request.correlation_id,
- )
-
- def test_store_result__no_routing_key(self):
- b = self.create_backend()
- b.destination_for = Mock()
- b.destination_for.return_value = None, None
- b.store_result('id', None, states.SUCCESS)
-
- def test_mark_as_done(self):
- tb1 = self.create_backend(max_cached_results=1)
- tb2 = self.create_backend(max_cached_results=1)
-
- tid = uuid()
-
- tb1.mark_as_done(tid, 42)
- assert tb2.get_state(tid) == states.SUCCESS
- assert tb2.get_result(tid) == 42
- assert tb2._cache.get(tid)
- assert tb2.get_result(tid), 42
-
- @pytest.mark.usefixtures('depends_on_current_app')
- def test_pickleable(self):
- assert loads(dumps(self.create_backend()))
-
- def test_revive(self):
- tb = self.create_backend()
- tb.revive(None)
-
- def test_is_pickled(self):
- tb1 = self.create_backend()
- tb2 = self.create_backend()
-
- tid2 = uuid()
- result = {'foo': 'baz', 'bar': SomeClass(12345)}
- tb1.mark_as_done(tid2, result)
- # is serialized properly.
- rindb = tb2.get_result(tid2)
- assert rindb.get('foo') == 'baz'
- assert rindb.get('bar').data == 12345
-
- def test_mark_as_failure(self):
- tb1 = self.create_backend()
- tb2 = self.create_backend()
-
- tid3 = uuid()
- try:
- raise KeyError('foo')
- except KeyError as exception:
- einfo = ExceptionInfo()
- tb1.mark_as_failure(tid3, exception, traceback=einfo.traceback)
- assert tb2.get_state(tid3) == states.FAILURE
- assert isinstance(tb2.get_result(tid3), KeyError)
- assert tb2.get_traceback(tid3) == einfo.traceback
-
- def test_repair_uuid(self):
- from celery.backends.amqp import repair_uuid
- for i in range(10):
- tid = uuid()
- assert repair_uuid(tid.replace('-', '')) == tid
-
- def test_expires_is_int(self):
- b = self.create_backend(expires=48)
- q = b._create_binding('x1y2z3')
- assert q.expires == 48
-
- def test_expires_is_float(self):
- b = self.create_backend(expires=48.3)
- q = b._create_binding('x1y2z3')
- assert q.expires == 48.3
-
- def test_expires_is_timedelta(self):
- b = self.create_backend(expires=timedelta(minutes=1))
- q = b._create_binding('x1y2z3')
- assert q.expires == 60
-
- @mock.sleepdeprived()
- def test_store_result_retries(self):
- iterations = [0]
- stop_raising_at = [5]
-
- def publish(*args, **kwargs):
- if iterations[0] > stop_raising_at[0]:
- return
- iterations[0] += 1
- raise KeyError('foo')
-
- backend = AMQPBackend(self.app)
- from celery.app.amqp import Producer
- prod, Producer.publish = Producer.publish, publish
- try:
- with pytest.raises(KeyError):
- backend.retry_policy['max_retries'] = None
- backend.store_result('foo', 'bar', 'STARTED')
-
- with pytest.raises(KeyError):
- backend.retry_policy['max_retries'] = 10
- backend.store_result('foo', 'bar', 'STARTED')
- finally:
- Producer.publish = prod
-
- def test_poll_no_messages(self):
- b = self.create_backend()
- assert b.get_task_meta(uuid())['status'] == states.PENDING
-
- @contextmanager
- def _result_context(self):
- results = Queue()
-
- class Message:
- acked = 0
- requeued = 0
-
- def __init__(self, **merge):
- self.payload = dict({'status': states.STARTED,
- 'result': None}, **merge)
- self.properties = {'correlation_id': merge.get('task_id')}
- self.body = pickle.dumps(self.payload)
- self.content_type = 'application/x-python-serialize'
- self.content_encoding = 'binary'
-
- def ack(self, *args, **kwargs):
- self.acked += 1
-
- def requeue(self, *args, **kwargs):
- self.requeued += 1
-
- class MockBinding:
-
- def __init__(self, *args, **kwargs):
- self.channel = Mock()
-
- def __call__(self, *args, **kwargs):
- return self
-
- def declare(self):
- pass
-
- def get(self, no_ack=False, accept=None):
- try:
- m = results.get(block=False)
- if m:
- m.accept = accept
- return m
- except Empty:
- pass
-
- def is_bound(self):
- return True
-
- class MockBackend(AMQPBackend):
- Queue = MockBinding
-
- backend = MockBackend(self.app, max_cached_results=100)
- backend._republish = Mock()
-
- yield results, backend, Message
-
- def test_backlog_limit_exceeded(self):
- with self._result_context() as (results, backend, Message):
- for i in range(1001):
- results.put(Message(task_id='id', status=states.RECEIVED))
- with pytest.raises(backend.BacklogLimitExceeded):
- backend.get_task_meta('id')
-
- def test_poll_result(self):
- with self._result_context() as (results, backend, Message):
- tid = uuid()
- # FFWD's to the latest state.
- state_messages = [
- Message(task_id=tid, status=states.RECEIVED, seq=1),
- Message(task_id=tid, status=states.STARTED, seq=2),
- Message(task_id=tid, status=states.FAILURE, seq=3),
- ]
- for state_message in state_messages:
- results.put(state_message)
- r1 = backend.get_task_meta(tid)
- # FFWDs to the last state.
- assert r1['status'] == states.FAILURE
- assert r1['seq'] == 3
-
- # Caches last known state.
- tid = uuid()
- results.put(Message(task_id=tid))
- backend.get_task_meta(tid)
- assert tid, backend._cache in 'Caches last known state'
-
- assert state_messages[-1].requeued
-
- # Returns cache if no new states.
- results.queue.clear()
- assert not results.qsize()
- backend._cache[tid] = 'hello'
- # returns cache if no new states.
- assert backend.get_task_meta(tid) == 'hello'
-
- def test_drain_events_decodes_exceptions_in_meta(self):
- tid = uuid()
- b = self.create_backend(serializer='json')
- b.store_result(tid, RuntimeError('aap'), states.FAILURE)
- result = AsyncResult(tid, backend=b)
-
- with pytest.raises(Exception) as excinfo:
- result.get()
-
- assert excinfo.value.__class__.__name__ == 'RuntimeError'
- assert str(excinfo.value) == 'aap'
-
- def test_no_expires(self):
- b = self.create_backend(expires=None)
- app = self.app
- app.conf.result_expires = None
- b = self.create_backend(expires=None)
- q = b._create_binding('foo')
- assert q.expires is None
-
- def test_process_cleanup(self):
- self.create_backend().process_cleanup()
-
- def test_reload_task_result(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().reload_task_result('x')
-
- def test_reload_group_result(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().reload_group_result('x')
-
- def test_save_group(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().save_group('x', 'x')
-
- def test_restore_group(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().restore_group('x')
-
- def test_delete_group(self):
- with pytest.raises(NotImplementedError):
- self.create_backend().delete_group('x')
-
-
-class test_AMQPBackend_result_extended:
- def setup(self):
- self.app.conf.result_extended = True
-
- def test_store_result(self):
- b = AMQPBackend(self.app)
- tid = uuid()
-
- request = Context(args=(1, 2, 3), kwargs={'foo': 'bar'},
- task_name='mytask', retries=2,
- hostname='celery@worker_1',
- delivery_info={'routing_key': 'celery'})
-
- b.store_result(tid, {'fizz': 'buzz'}, states.SUCCESS, request=request)
-
- meta = b.get_task_meta(tid)
- assert meta == {
- 'args': [1, 2, 3],
- 'children': [],
- 'kwargs': {'foo': 'bar'},
- 'name': 'mytask',
- 'queue': 'celery',
- 'result': {'fizz': 'buzz'},
- 'retries': 2,
- 'status': 'SUCCESS',
- 'task_id': tid,
- 'traceback': None,
- 'worker': 'celery@worker_1',
- }
From b7ddd8a2659436cba61596beb308cdb79bd7a563 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 23 Sep 2020 19:07:59 +0300
Subject: [PATCH 032/157] Warn when deprecated settings are used (#6353)
* Warn when deprecated settings are used.
* Mention deprecation in docs.
* Refer to the right place in the documentation.
---
celery/app/log.py | 5 +++++
celery/app/utils.py | 20 ++++++++++++++++++++
celery/apps/worker.py | 8 ++++++++
docs/userguide/configuration.rst | 12 +++++++-----
docs/whatsnew-5.0.rst | 2 +-
5 files changed, 41 insertions(+), 6 deletions(-)
diff --git a/celery/app/log.py b/celery/app/log.py
index e96f1174221..d27a85ee559 100644
--- a/celery/app/log.py
+++ b/celery/app/log.py
@@ -9,12 +9,14 @@
import logging
import os
import sys
+import warnings
from logging.handlers import WatchedFileHandler
from kombu.utils.encoding import set_default_encoding_file
from celery import signals
from celery._state import get_current_task
+from celery.exceptions import CDeprecationWarning, CPendingDeprecationWarning
from celery.local import class_property
from celery.platforms import isatty
from celery.utils.log import (ColorFormatter, LoggingProxy, get_logger,
@@ -70,6 +72,9 @@ def setup(self, loglevel=None, logfile=None, redirect_stdouts=False,
CELERY_LOG_LEVEL=str(loglevel) if loglevel else '',
CELERY_LOG_FILE=str(logfile) if logfile else '',
)
+ warnings.filterwarnings('always', category=CDeprecationWarning)
+ warnings.filterwarnings('always', category=CPendingDeprecationWarning)
+ logging.captureWarnings(True)
return handled
def redirect_stdouts(self, loglevel=None, name='celery.redirected'):
diff --git a/celery/app/utils.py b/celery/app/utils.py
index 40610433cf0..c365808a484 100644
--- a/celery/app/utils.py
+++ b/celery/app/utils.py
@@ -77,6 +77,11 @@ class Settings(ConfigurationView):
"""
+ def __init__(self, *args, deprecated_settings=None, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ self.deprecated_settings = deprecated_settings
+
@property
def broker_read_url(self):
return (
@@ -190,6 +195,20 @@ def humanize(self, with_defaults=False, censored=True):
f'{key}: {pretty(value, width=50)}'
for key, value in self.table(with_defaults, censored).items())
+ def maybe_warn_deprecated_settings(self):
+ # TODO: Remove this method in Celery 6.0
+ if self.deprecated_settings:
+ from celery.utils import deprecated
+ from celery.app.defaults import _TO_NEW_KEY
+ for setting in self.deprecated_settings:
+ deprecated.warn(description=f'The {setting!r} setting',
+ removal='6.0.0',
+ alternative=f'Use the {_TO_NEW_KEY[setting]} instead')
+
+ return True
+
+ return False
+
def _new_key_to_old(key, convert=_TO_OLD_KEY.get):
return convert(key, key)
@@ -263,6 +282,7 @@ def detect_settings(conf, preconf=None, ignore_keys=None, prefix=None,
return Settings(
preconf, [conf, defaults],
(_old_key_to_new, _new_key_to_old),
+ deprecated_settings=is_in_old,
prefix=prefix,
)
diff --git a/celery/apps/worker.py b/celery/apps/worker.py
index cfa8099f34d..6c1b5eb1c20 100644
--- a/celery/apps/worker.py
+++ b/celery/apps/worker.py
@@ -140,6 +140,14 @@ def on_start(self):
if not self._custom_logging and self.redirect_stdouts:
app.log.redirect_stdouts(self.redirect_stdouts_level)
+ # TODO: Remove the following code in Celery 6.0
+ if app.conf.maybe_warn_deprecated_settings():
+ logger.warning(
+ "Please run `celery upgrade settings path/to/settings.py` "
+ "to avoid these warnings and to allow a smoother upgrade "
+ "to Celery 6.0."
+ )
+
def emit_banner(self):
# Dump configuration to screen so we have some basic information
# for when users sends bug reports.
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index 384be135b42..67b3bf96846 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -47,12 +47,14 @@ names, are the renaming of some prefixes, like ``celery_beat_`` to ``beat_``,
``celeryd_`` to ``worker_``, and most of the top level ``celery_`` settings
have been moved into a new ``task_`` prefix.
-.. note::
+.. warning::
+
+ Celery will still be able to read old configuration files until Celery 6.0.
+ Afterwards, support for the old configuration files will be removed.
+ We provide the ``celery upgrade`` command that should handle
+ plenty of cases (including :ref:`Django `).
- Celery will still be able to read old configuration files, so
- there's no rush in moving to the new settings format. Furthermore,
- we provide the ``celery upgrade`` command that should handle plenty
- of cases (including :ref:`Django `).
+ Please migrate to the new configuration scheme as soon as possible.
========================================== ==============================================
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index b062a275060..b341bc0e08d 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -116,7 +116,7 @@ please do so now.
We elected to extend the deprecation period until 6.0 since
we did not loudly warn about using these deprecated settings.
-Please refer to the :ref:`migration guide ` for instructions.
+Please refer to the :ref:`migration guide ` for instructions.
Step 3: Read the important notes in this document
-------------------------------------------------
From a5ee635031a9a208a245350c8e4a058dc7b05109 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 23 Sep 2020 19:21:10 +0300
Subject: [PATCH 033/157] Complete What's New.
---
docs/whatsnew-5.0.rst | 2 ++
1 file changed, 2 insertions(+)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index b341bc0e08d..8278fc6bb85 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -259,3 +259,5 @@ for the packager's convenience.
News
====
+
+There are no other functional changes.
From 42b0f3198dfde5c954e5b8d84bc4f3c37f03bfc5 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 12:57:14 +0300
Subject: [PATCH 034/157] Add wall of contributors.
---
docs/whatsnew-5.0.rst | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 8278fc6bb85..e375b462c84 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -85,6 +85,24 @@ Watch the next "What's New" document for updates.
Wall of Contributors
--------------------
+Artem Vasilyev
+Ash Berlin-Taylor
+Asif Saif Uddin (Auvi)
+Asif Saif Uddin
+Christian Clauss
+Germain Chazot
+Harry Moreno
+kevinbai
+Martin Paulus
+Matus Valo
+Matus Valo
+maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
+Omer Katz
+Patrick Cloke
+qiaocc
+Thomas Grainger
+Weiliang Li
+
.. note::
This wall was automatically generated from git history,
From 1b463c2b050e1ae095acd980bec243d8d39cb6ec Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 13:10:28 +0300
Subject: [PATCH 035/157] Update codename.
---
README.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.rst b/README.rst
index 2193389d89c..8ab4c23c5b4 100644
--- a/README.rst
+++ b/README.rst
@@ -2,7 +2,7 @@
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge|
-:Version: 5.0.0rc3 (cliffs)
+:Version: 5.0.0rc3 (singularity)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
From fba756f35b2ab719895bf30c52cd7233d635af86 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 13:14:07 +0300
Subject: [PATCH 036/157] Fix alt text.
---
README.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.rst b/README.rst
index 8ab4c23c5b4..ed2392cab52 100644
--- a/README.rst
+++ b/README.rst
@@ -518,7 +518,7 @@ file in the top distribution directory for the full license text.
:target: https://pypi.org/project/celery/
.. |pyimp| image:: https://img.shields.io/pypi/implementation/celery.svg
- :alt: Support Python implementations.
+ :alt: Supported Python implementations.
:target: https://pypi.org/project/celery/
.. |ocbackerbadge| image:: https://opencollective.com/celery/backers/badge.svg
From d1b5965c92b0470020495de5cecf7c28126f6396 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 13:34:41 +0300
Subject: [PATCH 037/157] isort.
---
celery/app/base.py | 3 ++-
celery/app/utils.py | 2 +-
celery/backends/couchbase.py | 3 ++-
celery/bin/base.py | 2 +-
celery/utils/serialization.py | 4 +---
t/skip.py | 1 +
t/unit/app/test_utils.py | 1 -
t/unit/apps/test_multi.py | 2 +-
t/unit/backends/test_database.py | 7 +++----
t/unit/backends/test_filesystem.py | 2 +-
t/unit/backends/test_redis.py | 2 +-
t/unit/concurrency/test_concurrency.py | 3 +--
t/unit/concurrency/test_eventlet.py | 3 +--
t/unit/concurrency/test_prefork.py | 3 +--
t/unit/contrib/test_migrate.py | 2 +-
t/unit/contrib/test_rdb.py | 3 +--
t/unit/utils/test_collections.py | 3 +--
t/unit/utils/test_platforms.py | 3 +--
t/unit/utils/test_sysinfo.py | 2 +-
t/unit/utils/test_term.py | 3 +--
t/unit/worker/test_components.py | 4 +---
t/unit/worker/test_worker.py | 3 +--
22 files changed, 25 insertions(+), 36 deletions(-)
diff --git a/celery/app/base.py b/celery/app/base.py
index c4657ce39f6..dc7c41d804f 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -31,9 +31,10 @@
from celery.utils.log import get_logger
from celery.utils.objects import FallbackContext, mro_lookup
from celery.utils.time import timezone, to_utc
-from . import backends
+
# Load all builtin tasks
from . import builtins # noqa
+from . import backends
from .annotations import prepare as prepare_annotations
from .autoretry import add_autoretry_behaviour
from .defaults import DEFAULT_SECURITY_DIGEST, find_deprecated_settings
diff --git a/celery/app/utils.py b/celery/app/utils.py
index c365808a484..05aeb1e5016 100644
--- a/celery/app/utils.py
+++ b/celery/app/utils.py
@@ -198,8 +198,8 @@ def humanize(self, with_defaults=False, censored=True):
def maybe_warn_deprecated_settings(self):
# TODO: Remove this method in Celery 6.0
if self.deprecated_settings:
- from celery.utils import deprecated
from celery.app.defaults import _TO_NEW_KEY
+ from celery.utils import deprecated
for setting in self.deprecated_settings:
deprecated.warn(description=f'The {setting!r} setting',
removal='6.0.0',
diff --git a/celery/backends/couchbase.py b/celery/backends/couchbase.py
index 4c15a37ab15..9ed594c4826 100644
--- a/celery/backends/couchbase.py
+++ b/celery/backends/couchbase.py
@@ -3,11 +3,12 @@
from kombu.utils.url import _parse_url
from celery.exceptions import ImproperlyConfigured
+
from .base import KeyValueStoreBackend
try:
- from couchbase.cluster import Cluster, ClusterOptions
from couchbase.auth import PasswordAuthenticator
+ from couchbase.cluster import Cluster, ClusterOptions
from couchbase_core._libcouchbase import FMT_AUTO
except ImportError:
Cluster = PasswordAuthenticator = ClusterOptions = None
diff --git a/celery/bin/base.py b/celery/bin/base.py
index b11ebecade8..5b74d5de046 100644
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -14,8 +14,8 @@
try:
from pygments import highlight
- from pygments.lexers import PythonLexer
from pygments.formatters import Terminal256Formatter
+ from pygments.lexers import PythonLexer
except ImportError:
def highlight(s, *args, **kwargs):
"""Place holder function in case pygments is missing."""
diff --git a/celery/utils/serialization.py b/celery/utils/serialization.py
index 4d1de4712ab..af7804a2132 100644
--- a/celery/utils/serialization.py
+++ b/celery/utils/serialization.py
@@ -8,9 +8,7 @@
from inspect import getmro
from itertools import takewhile
-from kombu.utils.encoding import bytes_to_str, str_to_bytes
-
-from kombu.utils.encoding import safe_repr
+from kombu.utils.encoding import bytes_to_str, safe_repr, str_to_bytes
try:
import cPickle as pickle
diff --git a/t/skip.py b/t/skip.py
index 6e3d86ec2ee..c1c5a802a09 100644
--- a/t/skip.py
+++ b/t/skip.py
@@ -1,4 +1,5 @@
import sys
+
import pytest
if_pypy = pytest.mark.skipif(getattr(sys, 'pypy_version_info', None), reason='PyPy not supported.')
diff --git a/t/unit/app/test_utils.py b/t/unit/app/test_utils.py
index 2a9827544a6..7eb8bec0f93 100644
--- a/t/unit/app/test_utils.py
+++ b/t/unit/app/test_utils.py
@@ -1,5 +1,4 @@
from collections.abc import Mapping, MutableMapping
-
from unittest.mock import Mock
from celery.app.utils import Settings, bugreport, filter_hidden_settings
diff --git a/t/unit/apps/test_multi.py b/t/unit/apps/test_multi.py
index 9d224baa2c6..f603c0f406d 100644
--- a/t/unit/apps/test_multi.py
+++ b/t/unit/apps/test_multi.py
@@ -6,9 +6,9 @@
import pytest
+import t.skip
from celery.apps.multi import (Cluster, MultiParser, NamespacedOptionParser,
Node, format_opt)
-import t.skip
class test_functions:
diff --git a/t/unit/backends/test_database.py b/t/unit/backends/test_database.py
index 15a338b29b1..bff42361841 100644
--- a/t/unit/backends/test_database.py
+++ b/t/unit/backends/test_database.py
@@ -10,12 +10,11 @@
pytest.importorskip('sqlalchemy')
-from celery.backends.database import (DatabaseBackend, retry, session, # noqa
+from celery.backends.database import (DatabaseBackend, retry, session, # noqa
session_cleanup)
-from celery.backends.database.models import Task, TaskSet # noqa
+from celery.backends.database.models import Task, TaskSet # noqa
from celery.backends.database.session import SessionManager # noqa
-
-from t import skip # noqa
+from t import skip # noqa
class SomeClass:
diff --git a/t/unit/backends/test_filesystem.py b/t/unit/backends/test_filesystem.py
index 97d4f7e670f..98a37b2e070 100644
--- a/t/unit/backends/test_filesystem.py
+++ b/t/unit/backends/test_filesystem.py
@@ -3,8 +3,8 @@
import tempfile
import pytest
-import t.skip
+import t.skip
from celery import states, uuid
from celery.backends import filesystem
from celery.backends.filesystem import FilesystemBackend
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
index 7c3e3e7d908..3f6257c8ae7 100644
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -11,7 +11,7 @@
from celery import signature, states, uuid
from celery.canvas import Signature
-from celery.exceptions import (ChordError, ImproperlyConfigured)
+from celery.exceptions import ChordError, ImproperlyConfigured
from celery.utils.collections import AttributeDict
diff --git a/t/unit/concurrency/test_concurrency.py b/t/unit/concurrency/test_concurrency.py
index 077369c22a4..a48ef83ce49 100644
--- a/t/unit/concurrency/test_concurrency.py
+++ b/t/unit/concurrency/test_concurrency.py
@@ -1,7 +1,6 @@
-from unittest.mock import Mock, patch
-
import os
from itertools import count
+from unittest.mock import Mock, patch
import pytest
diff --git a/t/unit/concurrency/test_eventlet.py b/t/unit/concurrency/test_eventlet.py
index 7d9dedee010..dcd803e5342 100644
--- a/t/unit/concurrency/test_eventlet.py
+++ b/t/unit/concurrency/test_eventlet.py
@@ -3,9 +3,8 @@
import pytest
-from celery.concurrency.eventlet import TaskPool, Timer, apply_target
-
import t.skip
+from celery.concurrency.eventlet import TaskPool, Timer, apply_target
eventlet_modules = (
'eventlet',
diff --git a/t/unit/concurrency/test_prefork.py b/t/unit/concurrency/test_prefork.py
index af12643f68c..275d4f2f521 100644
--- a/t/unit/concurrency/test_prefork.py
+++ b/t/unit/concurrency/test_prefork.py
@@ -7,14 +7,13 @@
import pytest
from case import mock
+import t.skip
from celery.app.defaults import DEFAULTS
from celery.concurrency.asynpool import iterate_file_descriptors_safely
from celery.utils.collections import AttributeDict
from celery.utils.functional import noop
from celery.utils.objects import Bunch
-import t.skip
-
try:
from celery.concurrency import asynpool
from celery.concurrency import prefork as mp
diff --git a/t/unit/contrib/test_migrate.py b/t/unit/contrib/test_migrate.py
index 466a89d443f..6754e536a6c 100644
--- a/t/unit/contrib/test_migrate.py
+++ b/t/unit/contrib/test_migrate.py
@@ -6,6 +6,7 @@
from case import mock
from kombu import Connection, Exchange, Producer, Queue
from kombu.transport.virtual import QoS
+from kombu.utils.encoding import ensure_bytes
from celery.contrib.migrate import (State, StopFiltering, _maybe_queue,
expand_dest, filter_callback,
@@ -13,7 +14,6 @@
migrate_tasks, move, move_by_idmap,
move_by_taskmap, move_task_by_id,
start_filter, task_id_eq, task_id_in)
-from kombu.utils.encoding import ensure_bytes
# hack to ignore error at shutdown
QoS.restore_at_shutdown = False
diff --git a/t/unit/contrib/test_rdb.py b/t/unit/contrib/test_rdb.py
index 04121dd07a6..d89625719c6 100644
--- a/t/unit/contrib/test_rdb.py
+++ b/t/unit/contrib/test_rdb.py
@@ -4,11 +4,10 @@
import pytest
+import t.skip
from celery.contrib.rdb import Rdb, debugger, set_trace
from celery.utils.text import WhateverIO
-import t.skip
-
class SockErr(socket.error):
errno = None
diff --git a/t/unit/utils/test_collections.py b/t/unit/utils/test_collections.py
index 3ece457fb96..1830c7ce7cd 100644
--- a/t/unit/utils/test_collections.py
+++ b/t/unit/utils/test_collections.py
@@ -6,13 +6,12 @@
import pytest
from billiard.einfo import ExceptionInfo
+import t.skip
from celery.utils.collections import (AttributeDict, BufferMap,
ConfigurationView, DictAttribute,
LimitedSet, Messagebuffer)
from celery.utils.objects import Bunch
-import t.skip
-
class test_DictAttribute:
diff --git a/t/unit/utils/test_platforms.py b/t/unit/utils/test_platforms.py
index fc6f16b8c0b..c58a3ed6d68 100644
--- a/t/unit/utils/test_platforms.py
+++ b/t/unit/utils/test_platforms.py
@@ -8,6 +8,7 @@
import pytest
from case import mock
+import t.skip
from celery import _find_option_with_arg, platforms
from celery.exceptions import SecurityError
from celery.platforms import (DaemonContext, LockFailed, Pidfile,
@@ -20,8 +21,6 @@
signals)
from celery.utils.text import WhateverIO
-import t.skip
-
try:
import resource
except ImportError: # pragma: no cover
diff --git a/t/unit/utils/test_sysinfo.py b/t/unit/utils/test_sysinfo.py
index 4dcd5d6e65d..f892788a446 100644
--- a/t/unit/utils/test_sysinfo.py
+++ b/t/unit/utils/test_sysinfo.py
@@ -1,5 +1,5 @@
-import os
import importlib
+import os
import pytest
diff --git a/t/unit/utils/test_term.py b/t/unit/utils/test_term.py
index f423bf6a230..1a599b57d8c 100644
--- a/t/unit/utils/test_term.py
+++ b/t/unit/utils/test_term.py
@@ -1,10 +1,9 @@
import pytest
+import t.skip
from celery.utils import term
from celery.utils.term import colored, fg
-import t.skip
-
@t.skip.if_win32
class test_colored:
diff --git a/t/unit/worker/test_components.py b/t/unit/worker/test_components.py
index db904a464c9..14869cf6df7 100644
--- a/t/unit/worker/test_components.py
+++ b/t/unit/worker/test_components.py
@@ -2,12 +2,10 @@
import pytest
+import t.skip
from celery.exceptions import ImproperlyConfigured
from celery.worker.components import Beat, Hub, Pool, Timer
-
-import t.skip
-
# some of these are tested in test_worker, so I've only written tests
# here to complete coverage. Should move everything to this module at some
# point [-ask]
diff --git a/t/unit/worker/test_worker.py b/t/unit/worker/test_worker.py
index 9bc396e4f51..aedf852788f 100644
--- a/t/unit/worker/test_worker.py
+++ b/t/unit/worker/test_worker.py
@@ -19,6 +19,7 @@
from kombu.transport.memory import Transport
from kombu.utils.uuid import uuid
+import t.skip
from celery.bootsteps import CLOSE, RUN, TERMINATE, StartStopStep
from celery.concurrency.base import BasePool
from celery.exceptions import (ImproperlyConfigured, InvalidTaskError,
@@ -34,8 +35,6 @@
from celery.worker.pidbox import gPidbox
from celery.worker.request import Request
-import t.skip
-
def MockStep(step=None):
if step is None:
From 1b01683932fd7fd93f2f3cee4d83344d2ce6aeb1 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 16:35:30 +0300
Subject: [PATCH 038/157] PyPy 3.7 is currently in alpha.
No need for that sentence.
---
docs/whatsnew-5.0.rst | 2 --
1 file changed, 2 deletions(-)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index e375b462c84..176768898a7 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -61,8 +61,6 @@ for backwards compatibility.
From now on we only support Python 3.6 and above.
We will maintain compatibility with Python 3.6 until it's
EOL in December, 2021.
-We may choose to extend our support if a PyPy version for 3.7 will not become
-available by then but we don't guarantee we will.
*— Omer Katz*
From 518bf9f0f4a5ddcb220f7b1ef8b30ecc6d42e148 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 17:00:19 +0300
Subject: [PATCH 039/157] Mention the new pytest-celery plugin.
---
docs/userguide/testing.rst | 2 ++
docs/whatsnew-5.0.rst | 7 +++++++
2 files changed, 9 insertions(+)
diff --git a/docs/userguide/testing.rst b/docs/userguide/testing.rst
index cc92ae53fb3..4deccd0f15c 100644
--- a/docs/userguide/testing.rst
+++ b/docs/userguide/testing.rst
@@ -88,6 +88,8 @@ in this example:
with raises(Retry):
send_order(product.pk, 3, Decimal(30.6))
+.. _pytest_plugin:
+
pytest
======
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 176768898a7..df871abe6f5 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -271,6 +271,13 @@ completion support for the zsh and fish shells.
The bash completion script was exported to `extras/celery.bash `_
for the packager's convenience.
+Pytest Integration
+------------------
+
+Starting from Celery 5.0, the pytest plugin is no longer enabled by default.
+
+Please refer to the :ref:`documentation ` for instructions.
+
.. _v500-news:
News
From be8547e30090d142aa4c9cb4ef7169eb077cdb8e Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 17:08:42 +0300
Subject: [PATCH 040/157] Mention retry policy for the redis result backend.
---
docs/getting-started/brokers/redis.rst | 4 +++-
docs/whatsnew-5.0.rst | 8 +++++++-
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/docs/getting-started/brokers/redis.rst b/docs/getting-started/brokers/redis.rst
index 52a9b6944b3..54d533b91a6 100644
--- a/docs/getting-started/brokers/redis.rst
+++ b/docs/getting-started/brokers/redis.rst
@@ -94,6 +94,8 @@ If you are using Sentinel, you should specify the master_name using the :setting
app.conf.result_backend_transport_options = {'master_name': "mymaster"}
+.. _redis-result-backend-timeout:
+
Connection timeouts
^^^^^^^^^^^^^^^^^^^
@@ -164,7 +166,7 @@ by setting in the redis configuration file:
- the ``maxmemory-policy`` option to ``noeviction`` or ``allkeys-lru``
See Redis server documentation about Eviction Policies for details:
-
+
https://redis.io/topics/lru-cache
Group result ordering
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index df871abe6f5..490e2f1c162 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -283,4 +283,10 @@ Please refer to the :ref:`documentation ` for instructions.
News
====
-There are no other functional changes.
+Retry Policy for the Redis Result Backend
+-----------------------------------------
+
+The retry policy for the Redis result backend is now exposed through
+the result backend transport options.
+
+Please refer to the :ref:`documentation ` for details.
From c0b158d0bd888dc4b4375576dddfe60e41480e5f Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 17:08:51 +0300
Subject: [PATCH 041/157] Fix phrasing.
---
docs/whatsnew-5.0.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 490e2f1c162..745aca84f6b 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -261,7 +261,7 @@ As a result a few breaking changes has been introduced:
- Postfix global options like `celery worker --app path.to.app` or `celery worker --workdir /path/to/workdir` are no longer supported.
You should specify them as part of the global options of the main celery command.
- :program:`celery amqp` and :program:`celery shell` require the `repl`
- sub command to start a shell. You can now invoke specific shell commands
+ sub command to start a shell. You can now also invoke specific commands
without a shell. Type `celery amqp --help` or `celery shell --help` for details.
Click provides shell completion `out of the box `_.
From 1c6be61cd5877beed8198609b79ba85bbbbe4970 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 17:13:46 +0300
Subject: [PATCH 042/157] Mention ordered group results are now the default.
---
docs/getting-started/brokers/redis.rst | 2 ++
docs/whatsnew-5.0.rst | 14 ++++++++++++++
2 files changed, 16 insertions(+)
diff --git a/docs/getting-started/brokers/redis.rst b/docs/getting-started/brokers/redis.rst
index 54d533b91a6..ba4b31aa9bd 100644
--- a/docs/getting-started/brokers/redis.rst
+++ b/docs/getting-started/brokers/redis.rst
@@ -169,6 +169,8 @@ See Redis server documentation about Eviction Policies for details:
https://redis.io/topics/lru-cache
+.. _redis-group-result-ordering:
+
Group result ordering
---------------------
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 745aca84f6b..af8dd18fa5d 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -278,6 +278,20 @@ Starting from Celery 5.0, the pytest plugin is no longer enabled by default.
Please refer to the :ref:`documentation ` for instructions.
+Ordered Group Results for the Redis Result Backend
+-------------------------------------------------
+
+Previously group results were not ordered by their invocation order.
+Celery 4.4.7 introduced an opt-in feature to make them ordered.
+
+It is now an opt-out behavior.
+
+If you were previously using the Redis result backend, you might need to
+out-out of this behavior.
+
+Please refer to the :ref:`documentation `
+for instructions on how to disable this feature.
+
.. _v500-news:
News
From 67052f3c61f708d833d0cb465f5a48f8a36f91d7 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 17:19:58 +0300
Subject: [PATCH 043/157] pyupgrade.
---
celery/app/autoretry.py | 1 -
celery/backends/database/session.py | 4 ++--
celery/backends/filesystem.py | 9 +--------
celery/bin/amqp.py | 2 +-
celery/bin/base.py | 4 ++--
celery/bin/control.py | 6 +++---
celery/bin/events.py | 4 ++--
celery/bin/graph.py | 12 ++++++------
celery/bin/list.py | 2 +-
celery/bin/logtool.py | 4 ++--
celery/bin/multi.py | 20 ++++++++++----------
celery/bin/upgrade.py | 4 ++--
celery/contrib/testing/mocks.py | 5 +----
celery/security/__init__.py | 4 ++--
t/benchmarks/bench_worker.py | 2 +-
t/unit/apps/test_multi.py | 6 +++---
t/unit/backends/test_asynchronous.py | 2 +-
t/unit/backends/test_cassandra.py | 4 ++--
t/unit/backends/test_mongodb.py | 4 ++--
19 files changed, 44 insertions(+), 55 deletions(-)
diff --git a/celery/app/autoretry.py b/celery/app/autoretry.py
index 678f3970897..21c90e026a2 100644
--- a/celery/app/autoretry.py
+++ b/celery/app/autoretry.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""Tasks auto-retry functionality."""
from vine.utils import wraps
diff --git a/celery/backends/database/session.py b/celery/backends/database/session.py
index e03271f2c1d..047a9271d92 100644
--- a/celery/backends/database/session.py
+++ b/celery/backends/database/session.py
@@ -36,8 +36,8 @@ def get_engine(self, dburi, **kwargs):
engine = self._engines[dburi] = create_engine(dburi, **kwargs)
return engine
else:
- kwargs = dict([(k, v) for k, v in kwargs.items() if
- not k.startswith('pool')])
+ kwargs = {k: v for k, v in kwargs.items() if
+ not k.startswith('pool')}
return create_engine(dburi, poolclass=NullPool, **kwargs)
def create_session(self, dburi, short_lived_sessions=False, **kwargs):
diff --git a/celery/backends/filesystem.py b/celery/backends/filesystem.py
index ade24425dc4..6b937b693b5 100644
--- a/celery/backends/filesystem.py
+++ b/celery/backends/filesystem.py
@@ -8,13 +8,6 @@
from celery.backends.base import KeyValueStoreBackend
from celery.exceptions import ImproperlyConfigured
-# Python 2 does not have FileNotFoundError and IsADirectoryError
-try:
- FileNotFoundError
-except NameError:
- FileNotFoundError = IOError
- IsADirectoryError = IOError
-
default_encoding = locale.getpreferredencoding(False)
E_NO_PATH_SET = 'You need to configure a path for the file-system backend'
@@ -58,7 +51,7 @@ def __init__(self, url=None, open=open, unlink=os.unlink, sep=os.sep,
def __reduce__(self, args=(), kwargs={}):
kwargs.update(
dict(url=self.url))
- return super(FilesystemBackend, self).__reduce__(args, kwargs)
+ return super().__reduce__(args, kwargs)
def _find_path(self, url):
if not url:
diff --git a/celery/bin/amqp.py b/celery/bin/amqp.py
index 8b3dea87c71..e8b7f24066c 100644
--- a/celery/bin/amqp.py
+++ b/celery/bin/amqp.py
@@ -171,7 +171,7 @@ def queue_declare(amqp_context, queue, passive, durable, auto_delete):
amqp_context.reconnect()
else:
amqp_context.cli_context.secho(
- 'queue:{0} messages:{1} consumers:{2}'.format(*retval),
+ 'queue:{} messages:{} consumers:{}'.format(*retval),
fg='cyan', bold=True)
amqp_context.echo_ok()
diff --git a/celery/bin/base.py b/celery/bin/base.py
index 5b74d5de046..9429900a957 100644
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -120,13 +120,13 @@ class CeleryOption(click.Option):
def get_default(self, ctx):
if self.default_value_from_context:
self.default = ctx.obj[self.default_value_from_context]
- return super(CeleryOption, self).get_default(ctx)
+ return super().get_default(ctx)
def __init__(self, *args, **kwargs):
"""Initialize a Celery option."""
self.help_group = kwargs.pop('help_group', None)
self.default_value_from_context = kwargs.pop('default_value_from_context', None)
- super(CeleryOption, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
class CeleryCommand(click.Command):
diff --git a/celery/bin/control.py b/celery/bin/control.py
index fd6e8cbde2b..a48de89ce72 100644
--- a/celery/bin/control.py
+++ b/celery/bin/control.py
@@ -30,7 +30,7 @@ def _consume_arguments(meta, method, args):
if meta.variadic:
break
raise click.UsageError(
- 'Command {0!r} takes arguments: {1}'.format(
+ 'Command {!r} takes arguments: {}'.format(
method, meta.signature))
else:
yield name, typ(arg) if typ is not None else arg
@@ -86,7 +86,7 @@ def status(ctx, timeout, destination, json, **kwargs):
ctx.obj.echo(dumps(replies))
nodecount = len(replies)
if not kwargs.get('quiet', False):
- ctx.obj.echo('\n{0} {1} online.'.format(
+ ctx.obj.echo('\n{} {} online.'.format(
nodecount, text.pluralize(nodecount, 'node')))
@@ -134,7 +134,7 @@ def inspect(ctx, action, timeout, destination, json, **kwargs):
ctx.obj.echo(dumps(replies))
nodecount = len(replies)
if not ctx.obj.quiet:
- ctx.obj.echo('\n{0} {1} online.'.format(
+ ctx.obj.echo('\n{} {} online.'.format(
nodecount, text.pluralize(nodecount, 'node')))
diff --git a/celery/bin/events.py b/celery/bin/events.py
index a9978a1a0fe..0e3bd1a8aea 100644
--- a/celery/bin/events.py
+++ b/celery/bin/events.py
@@ -9,8 +9,8 @@
def _set_process_status(prog, info=''):
- prog = '{0}:{1}'.format('celery events', prog)
- info = '{0} {1}'.format(info, strargv(sys.argv))
+ prog = '{}:{}'.format('celery events', prog)
+ info = '{} {}'.format(info, strargv(sys.argv))
return set_process_title(prog, info=info)
diff --git a/celery/bin/graph.py b/celery/bin/graph.py
index 1cdbc25f5e4..4a1b005120e 100644
--- a/celery/bin/graph.py
+++ b/celery/bin/graph.py
@@ -42,10 +42,10 @@ def maybe_list(l, sep=','):
generic = 'generic' in args
def generic_label(node):
- return '{0} ({1}://)'.format(type(node).__name__,
+ return '{} ({}://)'.format(type(node).__name__,
node._label.split('://')[0])
- class Node(object):
+ class Node:
force_label = None
scheme = {}
@@ -71,8 +71,8 @@ class Thread(Node):
def __init__(self, label, **kwargs):
self.real_label = label
- super(Thread, self).__init__(
- label='thr-{0}'.format(next(tids)),
+ super().__init__(
+ label='thr-{}'.format(next(tids)),
pos=0,
)
@@ -139,11 +139,11 @@ def maybe_abbr(l, name, max=Wmax):
size = len(l)
abbr = max and size > max
if 'enumerate' in args:
- l = ['{0}{1}'.format(name, subscript(i + 1))
+ l = ['{}{}'.format(name, subscript(i + 1))
for i, obj in enumerate(l)]
if abbr:
l = l[0:max - 1] + [l[size - 1]]
- l[max - 2] = '{0}⎨…{1}⎬'.format(
+ l[max - 2] = '{}⎨…{}⎬'.format(
name[0], subscript(size - (max - 1)))
return l
diff --git a/celery/bin/list.py b/celery/bin/list.py
index 47d71045fd0..fefc5e73fde 100644
--- a/celery/bin/list.py
+++ b/celery/bin/list.py
@@ -29,7 +29,7 @@ def bindings(ctx):
raise click.UsageError('Your transport cannot list bindings.')
def fmt(q, e, r):
- ctx.obj.echo('{0:<28} {1:<28} {2}'.format(q, e, r))
+ ctx.obj.echo(f'{q:<28} {e:<28} {r}')
fmt('Queue', 'Exchange', 'Routing Key')
fmt('-' * 16, '-' * 16, '-' * 16)
for b in bindings:
diff --git a/celery/bin/logtool.py b/celery/bin/logtool.py
index 6430aad964e..07dbffa8767 100644
--- a/celery/bin/logtool.py
+++ b/celery/bin/logtool.py
@@ -32,7 +32,7 @@ class _task_counts(list):
@property
def format(self):
- return '\n'.join('{0}: {1}'.format(*i) for i in self)
+ return '\n'.join('{}: {}'.format(*i) for i in self)
def task_info(line):
@@ -40,7 +40,7 @@ def task_info(line):
return m.groups()
-class Audit(object):
+class Audit:
def __init__(self, on_task_error=None, on_trace=None, on_debug=None):
self.ids = set()
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index d25325df1ba..3e999ab2ab5 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -167,7 +167,7 @@ def _inner(self, *argv, **kwargs):
return _inner
-class TermLogger(object):
+class TermLogger:
splash_text = 'celery multi v{version}'
splash_context = {'version': VERSION_BANNER}
@@ -277,7 +277,7 @@ def call_command(self, command, argv):
try:
return self.commands[command](*argv) or EX_OK
except KeyError:
- return self.error('Invalid command: {0}'.format(command))
+ return self.error(f'Invalid command: {command}')
def _handle_reserved_options(self, argv):
argv = list(argv) # don't modify callers argv.
@@ -402,7 +402,7 @@ def on_still_waiting_for(self, nodes):
num_left = len(nodes)
if num_left:
self.note(self.colored.blue(
- '> Waiting for {0} {1} -> {2}...'.format(
+ '> Waiting for {} {} -> {}...'.format(
num_left, pluralize(num_left, 'node'),
', '.join(str(node.pid) for node in nodes)),
), newline=False)
@@ -419,17 +419,17 @@ def on_node_signal_dead(self, node):
node))
def on_node_start(self, node):
- self.note('\t> {0.name}: '.format(node), newline=False)
+ self.note(f'\t> {node.name}: ', newline=False)
def on_node_restart(self, node):
self.note(self.colored.blue(
- '> Restarting node {0.name}: '.format(node)), newline=False)
+ f'> Restarting node {node.name}: '), newline=False)
def on_node_down(self, node):
- self.note('> {0.name}: {1.DOWN}'.format(node, self))
+ self.note(f'> {node.name}: {self.DOWN}')
def on_node_shutdown_ok(self, node):
- self.note('\n\t> {0.name}: {1.OK}'.format(node, self))
+ self.note(f'\n\t> {node.name}: {self.OK}')
def on_node_status(self, node, retval):
self.note(retval and self.FAILED or self.OK)
@@ -439,13 +439,13 @@ def on_node_signal(self, node, sig):
node, sig=sig))
def on_child_spawn(self, node, argstr, env):
- self.info(' {0}'.format(argstr))
+ self.info(f' {argstr}')
def on_child_signalled(self, node, signum):
- self.note('* Child was terminated by signal {0}'.format(signum))
+ self.note(f'* Child was terminated by signal {signum}')
def on_child_failure(self, node, retcode):
- self.note('* Child terminated with exit code {0}'.format(retcode))
+ self.note(f'* Child terminated with exit code {retcode}')
@cached_property
def OK(self):
diff --git a/celery/bin/upgrade.py b/celery/bin/upgrade.py
index 66eb27caaea..1518297172c 100644
--- a/celery/bin/upgrade.py
+++ b/celery/bin/upgrade.py
@@ -30,7 +30,7 @@ def _compat_key(key, namespace='CELERY'):
def _backup(filename, suffix='.orig'):
lines = []
backup_filename = ''.join([filename, suffix])
- print('writing backup to {0}...'.format(backup_filename),
+ print(f'writing backup to {backup_filename}...',
file=sys.stderr)
with codecs.open(filename, 'r', 'utf-8') as read_fh:
with codecs.open(backup_filename, 'w', 'utf-8') as backup_fh:
@@ -71,7 +71,7 @@ def settings(filename, django, compat, no_backup):
"""Migrate settings from Celery 3.x to Celery 4.x."""
lines = _slurp(filename)
keyfilter = _compat_key if django or compat else pass1
- print('processing {0}...'.format(filename), file=sys.stderr)
+ print(f'processing {filename}...', file=sys.stderr)
# gives list of tuples: ``(did_change, line_contents)``
new_lines = [
_to_new_key(line, keyfilter) for line in lines
diff --git a/celery/contrib/testing/mocks.py b/celery/contrib/testing/mocks.py
index 92afed361f7..6294e6905cb 100644
--- a/celery/contrib/testing/mocks.py
+++ b/celery/contrib/testing/mocks.py
@@ -5,10 +5,7 @@
try:
from case import Mock
except ImportError:
- try:
- from unittest.mock import Mock
- except ImportError:
- from mock import Mock
+ from unittest.mock import Mock
def TaskMessage(
diff --git a/celery/security/__init__.py b/celery/security/__init__.py
index 18b205d696f..316ec1db5c1 100644
--- a/celery/security/__init__.py
+++ b/celery/security/__init__.py
@@ -64,8 +64,8 @@ def setup_security(allowed_serializers=None, key=None, cert=None, store=None,
if not (key and cert and store):
raise ImproperlyConfigured(SECURITY_SETTING_MISSING)
- with open(key, 'r') as kf:
- with open(cert, 'r') as cf:
+ with open(key) as kf:
+ with open(cert) as cf:
register_auth(kf.read(), cf.read(), store, digest, serializer)
registry._set_default_serializer('auth')
diff --git a/t/benchmarks/bench_worker.py b/t/benchmarks/bench_worker.py
index 716094a5ed8..a2102b8bf19 100644
--- a/t/benchmarks/bench_worker.py
+++ b/t/benchmarks/bench_worker.py
@@ -55,7 +55,7 @@ def it(_, n):
elif i > n - 2:
total = tdiff(it.time_start)
print('({} so far: {}s)'.format(i, tdiff(it.subt)), file=sys.stderr)
- print('-- process {0} tasks: {1}s total, {2} tasks/s'.format(
+ print('-- process {} tasks: {}s total, {} tasks/s'.format(
n, total, n / (total + .0),
))
import os
diff --git a/t/unit/apps/test_multi.py b/t/unit/apps/test_multi.py
index f603c0f406d..f7de1d5e27f 100644
--- a/t/unit/apps/test_multi.py
+++ b/t/unit/apps/test_multi.py
@@ -116,7 +116,7 @@ def _args(name, *args):
return args + (
'--pidfile={}.pid'.format(os.path.join(os.path.normpath('/var/run/celery/'), name)),
'--logfile={}%I.log'.format(os.path.join(os.path.normpath('/var/log/celery/'), name)),
- '--executable={0}'.format(sys.executable),
+ f'--executable={sys.executable}',
'',
)
@@ -406,7 +406,7 @@ def test_getpids(self):
assert node_0.name == 'foo@e.com'
assert sorted(node_0.argv) == sorted([
'',
- '--executable={0}'.format(node_0.executable),
+ f'--executable={node_0.executable}',
'--logfile={}'.format(os.path.normpath('/var/log/celery/foo%I.log')),
'--pidfile={}'.format(os.path.normpath('/var/run/celery/foo.pid')),
'-m celery worker --detach',
@@ -417,7 +417,7 @@ def test_getpids(self):
assert node_1.name == 'bar@e.com'
assert sorted(node_1.argv) == sorted([
'',
- '--executable={0}'.format(node_1.executable),
+ f'--executable={node_1.executable}',
'--logfile={}'.format(os.path.normpath('/var/log/celery/bar%I.log')),
'--pidfile={}'.format(os.path.normpath('/var/run/celery/bar.pid')),
'-m celery worker --detach',
diff --git a/t/unit/backends/test_asynchronous.py b/t/unit/backends/test_asynchronous.py
index bfc20a63265..75ba90baa97 100644
--- a/t/unit/backends/test_asynchronous.py
+++ b/t/unit/backends/test_asynchronous.py
@@ -20,7 +20,7 @@ def setup_eventlet():
os.environ.update(EVENTLET_NO_GREENDNS='yes')
-class DrainerTests(object):
+class DrainerTests:
"""
Base test class for the Default / Gevent / Eventlet drainers.
"""
diff --git a/t/unit/backends/test_cassandra.py b/t/unit/backends/test_cassandra.py
index 3f218ddc115..3e648bff0ed 100644
--- a/t/unit/backends/test_cassandra.py
+++ b/t/unit/backends/test_cassandra.py
@@ -149,7 +149,7 @@ def __init__(self, *args, **kwargs):
def execute(self, *args, **kwargs):
raise OTOExc()
- class DummyCluster(object):
+ class DummyCluster:
def __init__(self, *args, **kwargs):
pass
@@ -170,7 +170,7 @@ def test_init_session(self):
# Tests behavior when Cluster.connect works properly
from celery.backends import cassandra as mod
- class DummyCluster(object):
+ class DummyCluster:
def __init__(self, *args, **kwargs):
pass
diff --git a/t/unit/backends/test_mongodb.py b/t/unit/backends/test_mongodb.py
index a67411f6121..6bd498e373e 100644
--- a/t/unit/backends/test_mongodb.py
+++ b/t/unit/backends/test_mongodb.py
@@ -568,7 +568,7 @@ def test_encode_decode(self, mongo_backend_factory, serializer,
assert decoded == 12
-class _MyTestClass(object):
+class _MyTestClass:
def __init__(self, a):
self.a = a
@@ -632,7 +632,7 @@ def fake_mongo_collection_patch(self, monkeypatch):
"""A fake collection with serialization experience close to MongoDB."""
bson = pytest.importorskip("bson")
- class FakeMongoCollection(object):
+ class FakeMongoCollection:
def __init__(self):
self.data = {}
From 661f6b14c40b225eb838cf012070b5f9cd9ed306 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 17:36:37 +0300
Subject: [PATCH 044/157] Complete release notes.
---
Changelog.rst | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/Changelog.rst b/Changelog.rst
index 0a37098c9f0..a8fc6d47665 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -11,16 +11,20 @@ an overview of what's new in Celery 5.0.
5.0.0
=====
-:release-date: N/A
+:release-date: 2020-09-24 6.00 P.M UTC+3:00
:release-by: Omer Katz
+- **Breaking Change** Remove AMQP result backend (#6360).
+- Warn when deprecated settings are used (#6353).
+- Expose retry_policy for Redis result backend (#6330).
+- Prepare Celery to support the yet to be released Python 3.9 (#6328).
5.0.0rc3
========
:release-date: 2020-09-07 4.00 P.M UTC+3:00
:release-by: Omer Katz
-- More cleanups of leftover Python 2 support. (#6338)
+- More cleanups of leftover Python 2 support (#6338).
5.0.0rc2
========
@@ -45,7 +49,7 @@ an overview of what's new in Celery 5.0.
:release-by: Omer Katz
- **Breaking Change** Drop support for the Riak result backend (#5686).
-- **Breaking Change** pytest plugin is no longer enabled by default. (#6288)
+- **Breaking Change** pytest plugin is no longer enabled by default (#6288).
Install pytest-celery to enable it.
- **Breaking Change** Brand new CLI based on Click (#5718).
From a99a9034c44dba06a43a3ed06ecf7949072dcc5f Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 17:44:01 +0300
Subject: [PATCH 045/157] =?UTF-8?q?Bump=20version:=205.0.0rc3=20=E2=86=92?=
=?UTF-8?q?=205.0.0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.bumpversion.cfg | 2 +-
README.rst | 6 +++---
celery/__init__.py | 2 +-
docs/includes/introduction.txt | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 62e8b476da7..80aca1abc6f 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 5.0.0rc3
+current_version = 5.0.0
commit = True
tag = True
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(?P[a-z\d]+)?
diff --git a/README.rst b/README.rst
index ed2392cab52..cb2d07c42f9 100644
--- a/README.rst
+++ b/README.rst
@@ -2,7 +2,7 @@
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge|
-:Version: 5.0.0rc3 (singularity)
+:Version: 5.0.0 (singularity)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
@@ -57,7 +57,7 @@ in such a way that the client enqueues an URL to be requested by a worker.
What do I need?
===============
-Celery version 5.0.0rc3 runs on,
+Celery version 5.0.0 runs on,
- Python (3.6, 3.7, 3.8)
- PyPy3.6 (7.6)
@@ -89,7 +89,7 @@ Get Started
===========
If this is the first time you're trying to use Celery, or you're
-new to Celery 5.0.0rc3 coming from previous versions then you should read our
+new to Celery 5.0.0 coming from previous versions then you should read our
getting started tutorials:
- `First steps with Celery`_
diff --git a/celery/__init__.py b/celery/__init__.py
index 1413244a3a3..9ccaae8874d 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -17,7 +17,7 @@
SERIES = 'singularity'
-__version__ = '5.0.0rc3'
+__version__ = '5.0.0'
__author__ = 'Ask Solem'
__contact__ = 'auvipy@gmail.com'
__homepage__ = 'http://celeryproject.org'
diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt
index ec4ccbbaf45..0ba1f965b3f 100644
--- a/docs/includes/introduction.txt
+++ b/docs/includes/introduction.txt
@@ -1,4 +1,4 @@
-:Version: 5.0.0rc3 (cliffs)
+:Version: 5.0.0 (cliffs)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
From 5e05400dbe6054659b8818fd1ae56a7610a7f741 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 24 Sep 2020 17:56:58 +0300
Subject: [PATCH 046/157] Happify linters.
---
celery/bin/graph.py | 2 +-
t/integration/test_backend.py | 4 ++--
t/unit/backends/test_mongodb.py | 4 ++--
t/unit/contrib/test_migrate.py | 26 +++++++++++++-------------
t/unit/utils/test_sysinfo.py | 8 ++++----
5 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/celery/bin/graph.py b/celery/bin/graph.py
index 4a1b005120e..3013077b4b5 100644
--- a/celery/bin/graph.py
+++ b/celery/bin/graph.py
@@ -43,7 +43,7 @@ def maybe_list(l, sep=','):
def generic_label(node):
return '{} ({}://)'.format(type(node).__name__,
- node._label.split('://')[0])
+ node._label.split('://')[0])
class Node:
force_label = None
diff --git a/t/integration/test_backend.py b/t/integration/test_backend.py
index 6355b3cb6e6..67816322a17 100644
--- a/t/integration/test_backend.py
+++ b/t/integration/test_backend.py
@@ -9,8 +9,8 @@
@pytest.mark.skipif(
- not os.environ.get('AZUREBLOCKBLOB_URL'),
- reason='Environment variable AZUREBLOCKBLOB_URL required'
+ not os.environ.get('AZUREBLOCKBLOB_URL'),
+ reason='Environment variable AZUREBLOCKBLOB_URL required'
)
class test_AzureBlockBlobBackend:
def test_crud(self, manager):
diff --git a/t/unit/backends/test_mongodb.py b/t/unit/backends/test_mongodb.py
index 6bd498e373e..fb304b7e369 100644
--- a/t/unit/backends/test_mongodb.py
+++ b/t/unit/backends/test_mongodb.py
@@ -178,7 +178,7 @@ def test_ensure_mongodb_uri_compliance(self):
assert compliant_uri('mongodb://') == 'mongodb://localhost'
assert compliant_uri('mongodb+something://host') == \
- 'mongodb+something://host'
+ 'mongodb+something://host'
assert compliant_uri('something://host') == 'mongodb+something://host'
@@ -647,7 +647,7 @@ def find_one(self, task_id):
@pytest.mark.parametrize("serializer,result_type,result", [
(s, type(i['result']), i['result']) for i in SUCCESS_RESULT_TEST_DATA
for s in i['serializers']]
- )
+ )
def test_encode_success_results(self, mongo_backend_factory, serializer,
result_type, result):
backend = mongo_backend_factory(serializer=serializer)
diff --git a/t/unit/contrib/test_migrate.py b/t/unit/contrib/test_migrate.py
index 6754e536a6c..e36e2f32751 100644
--- a/t/unit/contrib/test_migrate.py
+++ b/t/unit/contrib/test_migrate.py
@@ -23,19 +23,19 @@ def Message(body, exchange='exchange', routing_key='rkey',
compression=None, content_type='application/json',
content_encoding='utf-8'):
return Mock(
- body=body,
- delivery_info={
- 'exchange': exchange,
- 'routing_key': routing_key,
- },
- headers={
- 'compression': compression,
- },
- content_type=content_type,
- content_encoding=content_encoding,
- properties={
- 'correlation_id': isinstance(body, dict) and body['id'] or None
- }
+ body=body,
+ delivery_info={
+ 'exchange': exchange,
+ 'routing_key': routing_key,
+ },
+ headers={
+ 'compression': compression,
+ },
+ content_type=content_type,
+ content_encoding=content_encoding,
+ properties={
+ 'correlation_id': isinstance(body, dict) and body['id'] or None
+ }
)
diff --git a/t/unit/utils/test_sysinfo.py b/t/unit/utils/test_sysinfo.py
index f892788a446..25c8ff5f886 100644
--- a/t/unit/utils/test_sysinfo.py
+++ b/t/unit/utils/test_sysinfo.py
@@ -12,8 +12,8 @@
@pytest.mark.skipif(
- not hasattr(os, 'getloadavg'),
- reason='Function os.getloadavg is not defined'
+ not hasattr(os, 'getloadavg'),
+ reason='Function os.getloadavg is not defined'
)
def test_load_average(patching):
getloadavg = patching('os.getloadavg')
@@ -24,8 +24,8 @@ def test_load_average(patching):
@pytest.mark.skipif(
- not hasattr(posix, 'statvfs_result'),
- reason='Function posix.statvfs_result is not defined'
+ not hasattr(posix, 'statvfs_result'),
+ reason='Function posix.statvfs_result is not defined'
)
def test_df():
x = df('/')
From ea37db1410c83271e06d78a564983cba3732a1b1 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 21 Sep 2020 13:36:19 +0300
Subject: [PATCH 047/157] Specify utf-8 as the encoding for log files.
Fixes #5144.
---
celery/app/log.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/app/log.py b/celery/app/log.py
index d27a85ee559..7e036746cc0 100644
--- a/celery/app/log.py
+++ b/celery/app/log.py
@@ -226,7 +226,7 @@ def _detect_handler(self, logfile=None):
logfile = sys.__stderr__ if logfile is None else logfile
if hasattr(logfile, 'write'):
return logging.StreamHandler(logfile)
- return WatchedFileHandler(logfile)
+ return WatchedFileHandler(logfile, encoding='utf-8')
def _has_handler(self, logger):
return any(
From cd8782feca5d961d08a1a46925e495b120dc3241 Mon Sep 17 00:00:00 2001
From: Akash Agrawal
Date: Wed, 30 Sep 2020 09:50:20 +0530
Subject: [PATCH 048/157] Fixed some typos in readme
---
README.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.rst b/README.rst
index cb2d07c42f9..3896f32a6fa 100644
--- a/README.rst
+++ b/README.rst
@@ -63,14 +63,14 @@ Celery version 5.0.0 runs on,
- PyPy3.6 (7.6)
-This is the next version to of celery which will support Python 3.6 or newer.
+This is the next version of celery which will support Python 3.6 or newer.
If you're running an older version of Python, you need to be running
an older version of Celery:
- Python 2.6: Celery series 3.1 or earlier.
- Python 2.5: Celery series 3.0 or earlier.
-- Python 2.4 was Celery series 2.2 or earlier.
+- Python 2.4: Celery series 2.2 or earlier.
- Python 2.7: Celery 4.x series.
Celery is a project with minimal funding,
From 0f1a53b84ab15e15bb257c4d9ce2b3459d2ed176 Mon Sep 17 00:00:00 2001
From: Michal Kuffa
Date: Tue, 29 Sep 2020 15:49:12 +0200
Subject: [PATCH 049/157] Fix custom headers propagation for protocol 1 hybrid
messages
---
celery/worker/strategy.py | 1 +
t/unit/worker/test_request.py | 4 ++--
t/unit/worker/test_strategy.py | 6 +++++-
3 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/celery/worker/strategy.py b/celery/worker/strategy.py
index 64d3c5337f2..8fb1eabd319 100644
--- a/celery/worker/strategy.py
+++ b/celery/worker/strategy.py
@@ -50,6 +50,7 @@ def hybrid_to_proto2(message, body):
'kwargsrepr': body.get('kwargsrepr'),
'origin': body.get('origin'),
}
+ headers.update(message.headers or {})
embed = {
'callbacks': body.get('callbacks'),
diff --git a/t/unit/worker/test_request.py b/t/unit/worker/test_request.py
index 039af717b2d..3ed7c553d15 100644
--- a/t/unit/worker/test_request.py
+++ b/t/unit/worker/test_request.py
@@ -1204,8 +1204,8 @@ def test_execute_using_pool_with_none_timelimit_header(self):
def test_execute_using_pool__defaults_of_hybrid_to_proto2(self):
weakref_ref = Mock(name='weakref.ref')
- headers = strategy.hybrid_to_proto2('', {'id': uuid(),
- 'task': self.mytask.name})[1]
+ headers = strategy.hybrid_to_proto2(Mock(headers=None), {'id': uuid(),
+ 'task': self.mytask.name})[1]
job = self.zRequest(revoked_tasks=set(), ref=weakref_ref, **headers)
job.execute_using_pool(self.pool)
assert job._apply_result
diff --git a/t/unit/worker/test_strategy.py b/t/unit/worker/test_strategy.py
index 6b93dab74d9..88abe4dcd27 100644
--- a/t/unit/worker/test_strategy.py
+++ b/t/unit/worker/test_strategy.py
@@ -271,7 +271,7 @@ def failed():
class test_hybrid_to_proto2:
def setup(self):
- self.message = Mock(name='message')
+ self.message = Mock(name='message', headers={"custom": "header"})
self.body = {
'args': (1,),
'kwargs': {'foo': 'baz'},
@@ -288,3 +288,7 @@ def test_retries_custom_value(self):
self.body['retries'] = _custom_value
_, headers, _, _ = hybrid_to_proto2(self.message, self.body)
assert headers.get('retries') == _custom_value
+
+ def test_custom_headers(self):
+ _, headers, _, _ = hybrid_to_proto2(self.message, self.body)
+ assert headers.get("custom") == "header"
From 6d270b94642188be774e228f97fd6af89ac547af Mon Sep 17 00:00:00 2001
From: Frazer McLean
Date: Wed, 30 Sep 2020 11:00:56 +0200
Subject: [PATCH 050/157] Retry after race during schema creation in database
backend (#6298)
* Retry after race during schema creation in database backend
Fixes #6296
This race condition does not commonly present, since the schema creation
only needs to happen once per database. It's more likely to appear in
e.g. a test suite that uses a new database each time.
For context of the sleep times I chose, the schema creation takes ~50 ms
on my laptop.
I did a simulated test run of 50 concurrent calls to MetaData.create_all
repeated 200 times and the number of retries was:
- 0 retries: 8717x
- 1 retry: 1279x
- 2 retries 4x
* Add test for prepare_models retry error condition
* Add name to contributors
---
CONTRIBUTORS.txt | 1 +
celery/backends/database/session.py | 27 ++++++++++++++++++++++++++-
t/unit/backends/test_database.py | 28 +++++++++++++++++++++++++++-
3 files changed, 54 insertions(+), 2 deletions(-)
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index 748cabf4d0b..a29157e1e57 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -277,3 +277,4 @@ Kyle Johnson, 2019/09/23
Dipankar Achinta, 2019/10/24
Sardorbek Imomaliev, 2020/01/24
Maksym Shalenyi, 2020/07/30
+Frazer McLean, 2020/09/29
diff --git a/celery/backends/database/session.py b/celery/backends/database/session.py
index 047a9271d92..ca3d683bea6 100644
--- a/celery/backends/database/session.py
+++ b/celery/backends/database/session.py
@@ -1,14 +1,21 @@
"""SQLAlchemy session."""
+import time
+
from kombu.utils.compat import register_after_fork
from sqlalchemy import create_engine
+from sqlalchemy.exc import DatabaseError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
+from celery.utils.time import get_exponential_backoff_interval
+
ResultModelBase = declarative_base()
__all__ = ('SessionManager',)
+PREPARE_MODELS_MAX_RETRIES = 10
+
def _after_fork_cleanup_session(session):
session._after_fork()
@@ -50,7 +57,25 @@ def create_session(self, dburi, short_lived_sessions=False, **kwargs):
def prepare_models(self, engine):
if not self.prepared:
- ResultModelBase.metadata.create_all(engine)
+ # SQLAlchemy will check if the items exist before trying to
+ # create them, which is a race condition. If it raises an error
+ # in one iteration, the next may pass all the existence checks
+ # and the call will succeed.
+ retries = 0
+ while True:
+ try:
+ ResultModelBase.metadata.create_all(engine)
+ except DatabaseError:
+ if retries < PREPARE_MODELS_MAX_RETRIES:
+ sleep_amount_ms = get_exponential_backoff_interval(
+ 10, retries, 1000, True
+ )
+ time.sleep(sleep_amount_ms / 1000)
+ retries += 1
+ else:
+ raise
+ else:
+ break
self.prepared = True
def session_factory(self, dburi, **kwargs):
diff --git a/t/unit/backends/test_database.py b/t/unit/backends/test_database.py
index bff42361841..28e2fedbbbb 100644
--- a/t/unit/backends/test_database.py
+++ b/t/unit/backends/test_database.py
@@ -13,7 +13,8 @@
from celery.backends.database import (DatabaseBackend, retry, session, # noqa
session_cleanup)
from celery.backends.database.models import Task, TaskSet # noqa
-from celery.backends.database.session import SessionManager # noqa
+from celery.backends.database.session import ( # noqa
+ PREPARE_MODELS_MAX_RETRIES, ResultModelBase, SessionManager)
from t import skip # noqa
@@ -398,3 +399,28 @@ def test_coverage_madness(self):
SessionManager()
finally:
session.register_after_fork = prev
+
+ @patch('celery.backends.database.session.create_engine')
+ def test_prepare_models_terminates(self, create_engine):
+ """SessionManager.prepare_models has retry logic because the creation
+ of database tables by multiple workers is racy. This test patches
+ the used method to always raise, so we can verify that it does
+ eventually terminate.
+ """
+ from sqlalchemy.dialects.sqlite import dialect
+ from sqlalchemy.exc import DatabaseError
+
+ sqlite = dialect.dbapi()
+ manager = SessionManager()
+ engine = manager.get_engine('dburi')
+
+ def raise_err(bind):
+ raise DatabaseError("", "", [], sqlite.DatabaseError)
+
+ patch_create_all = patch.object(
+ ResultModelBase.metadata, 'create_all', side_effect=raise_err)
+
+ with pytest.raises(DatabaseError), patch_create_all as mock_create_all:
+ manager.prepare_models(engine)
+
+ assert mock_create_all.call_count == PREPARE_MODELS_MAX_RETRIES + 1
From 96ec6db611f86f44a99f58d107c484dc011110ce Mon Sep 17 00:00:00 2001
From: Maarten Fonville
Date: Fri, 25 Sep 2020 23:38:56 +0200
Subject: [PATCH 051/157] Update daemonizing.rst
Fix daemonizing documentation for issue #6363 to put `multi` before `-A`
---
docs/userguide/daemonizing.rst | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/docs/userguide/daemonizing.rst b/docs/userguide/daemonizing.rst
index 07e39009c97..225d078ac8e 100644
--- a/docs/userguide/daemonizing.rst
+++ b/docs/userguide/daemonizing.rst
@@ -72,11 +72,11 @@ the worker you must also export them (e.g., :command:`export DISPLAY=":0"`)
.. code-block:: console
- $ celery -A proj multi start worker1 \
+ $ celery multi -A proj start worker1 \
--pidfile="$HOME/run/celery/%n.pid" \
--logfile="$HOME/log/celery/%n%I.log"
- $ celery -A proj multi restart worker1 \
+ $ celery multi -A proj restart worker1 \
--logfile="$HOME/log/celery/%n%I.log" \
--pidfile="$HOME/run/celery/%n.pid
@@ -399,12 +399,12 @@ This is an example systemd file:
Group=celery
EnvironmentFile=/etc/conf.d/celery
WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi start ${CELERYD_NODES} \
+ ExecStart=/bin/sh -c '${CELERY_BIN} multi -A ${CELERY_APP} start ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE} \
--logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE}'
- ExecReload=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi restart ${CELERYD_NODES} \
+ ExecReload=/bin/sh -c '${CELERY_BIN} multi -A ${CELERY_APP} restart ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE} \
--logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
@@ -492,7 +492,7 @@ This is an example systemd file for Celery Beat:
Group=celery
EnvironmentFile=/etc/conf.d/celery
WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
+ ExecStart=/bin/sh -c '${CELERY_BIN} beat -A ${CELERY_APP} \
--pidfile=${CELERYBEAT_PID_FILE} \
--logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
From f05e82a32a737c4222ece0b446e7fb2fd8cc883f Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 30 Sep 2020 14:07:02 +0300
Subject: [PATCH 052/157] Revert "Update daemonizing.rst" (#6376)
This reverts commit 96ec6db611f86f44a99f58d107c484dc011110ce.
---
docs/userguide/daemonizing.rst | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/docs/userguide/daemonizing.rst b/docs/userguide/daemonizing.rst
index 225d078ac8e..07e39009c97 100644
--- a/docs/userguide/daemonizing.rst
+++ b/docs/userguide/daemonizing.rst
@@ -72,11 +72,11 @@ the worker you must also export them (e.g., :command:`export DISPLAY=":0"`)
.. code-block:: console
- $ celery multi -A proj start worker1 \
+ $ celery -A proj multi start worker1 \
--pidfile="$HOME/run/celery/%n.pid" \
--logfile="$HOME/log/celery/%n%I.log"
- $ celery multi -A proj restart worker1 \
+ $ celery -A proj multi restart worker1 \
--logfile="$HOME/log/celery/%n%I.log" \
--pidfile="$HOME/run/celery/%n.pid
@@ -399,12 +399,12 @@ This is an example systemd file:
Group=celery
EnvironmentFile=/etc/conf.d/celery
WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} multi -A ${CELERY_APP} start ${CELERYD_NODES} \
+ ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi start ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE} \
--logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE}'
- ExecReload=/bin/sh -c '${CELERY_BIN} multi -A ${CELERY_APP} restart ${CELERYD_NODES} \
+ ExecReload=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi restart ${CELERYD_NODES} \
--pidfile=${CELERYD_PID_FILE} \
--logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
@@ -492,7 +492,7 @@ This is an example systemd file for Celery Beat:
Group=celery
EnvironmentFile=/etc/conf.d/celery
WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} beat -A ${CELERY_APP} \
+ ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
--pidfile=${CELERYBEAT_PID_FILE} \
--logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
From ce4f759a5766331285c779ed87b724a755d18b74 Mon Sep 17 00:00:00 2001
From: laixintao
Date: Wed, 30 Sep 2020 21:36:09 +0800
Subject: [PATCH 053/157] bugfix: when set config result_expires = 0, chord.get
will hang. (#6373)
* bugfix: when set config result_expires = 0, chord.get will hang.
`EXPIRE key 0` will delete a key in redis, then chord will never get the
result.
fix: https://github.com/celery/celery/issues/5237
* test: add testcase for expire when set config with zero.
---
celery/backends/redis.py | 2 +-
t/unit/backends/test_redis.py | 18 ++++++++++++++++++
2 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
index 1b9db7433fe..2c428823538 100644
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -436,7 +436,7 @@ def on_chord_part_return(self, request, state, result,
if self._chord_zset
else pipe.rpush(jkey, encoded).llen(jkey)
).get(tkey)
- if self.expires is not None:
+ if self.expires:
pipeline = pipeline \
.expire(jkey, self.expires) \
.expire(tkey, self.expires)
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
index 3f6257c8ae7..2029edc3c29 100644
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -712,6 +712,24 @@ def test_on_chord_part_return_no_expiry(self, restore):
self.b.expires = old_expires
+ @patch('celery.result.GroupResult.restore')
+ def test_on_chord_part_return_expire_set_to_zero(self, restore):
+ old_expires = self.b.expires
+ self.b.expires = 0
+ tasks = [self.create_task(i) for i in range(10)]
+
+ for i in range(10):
+ self.b.on_chord_part_return(tasks[i].request, states.SUCCESS, i)
+ assert self.b.client.zadd.call_count
+ self.b.client.zadd.reset_mock()
+ assert self.b.client.zrangebyscore.call_count
+ jkey = self.b.get_key_for_group('group_id', '.j')
+ tkey = self.b.get_key_for_group('group_id', '.t')
+ self.b.client.delete.assert_has_calls([call(jkey), call(tkey)])
+ self.b.client.expire.assert_not_called()
+
+ self.b.expires = old_expires
+
@patch('celery.result.GroupResult.restore')
def test_on_chord_part_return_no_expiry__unordered(self, restore):
self.app.conf.result_backend_transport_options = dict(
From 431fffd7f29824cc08d566ed40bf398579979820 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 30 Sep 2020 15:16:20 +0300
Subject: [PATCH 054/157] Display a custom error message whenever an attempt to
use -A or --app as a sub-command option was made.
Fixes #6363
---
celery/bin/base.py | 5 ++---
celery/bin/celery.py | 30 +++++++++++++++++++++++++++++-
2 files changed, 31 insertions(+), 4 deletions(-)
diff --git a/celery/bin/base.py b/celery/bin/base.py
index 9429900a957..662ba728ae9 100644
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -39,8 +39,7 @@ def __init__(self, app, no_color, workdir, quiet=False):
@cached_property
def OK(self):
- return self.style("OK", fg="green", bold=True) \
-
+ return self.style("OK", fg="green", bold=True)
@cached_property
def ERROR(self):
@@ -72,7 +71,7 @@ def error(self, message=None, **kwargs):
kwargs['color'] = False
click.echo(message, **kwargs)
else:
- click.echo(message, **kwargs)
+ click.secho(message, **kwargs)
def pretty(self, n):
if isinstance(n, list):
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
index 4f7c95d065c..9f4fa0cbe4c 100644
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -2,6 +2,7 @@
import os
import click
+import click.exceptions
from click.types import ParamType
from click_didyoumean import DYMGroup
@@ -104,7 +105,8 @@ def celery(ctx, app, broker, result_backend, loader, config, workdir,
os.environ['CELERY_RESULT_BACKEND'] = result_backend
if config:
os.environ['CELERY_CONFIG_MODULE'] = config
- ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir, quiet=quiet)
+ ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,
+ quiet=quiet)
# User options
worker.params.extend(ctx.obj.app.user_options.get('worker', []))
@@ -139,6 +141,32 @@ def report(ctx):
celery.add_command(shell)
celery.add_command(multi)
+# Monkey-patch click to display a custom error
+# when -A or --app are used as sub-command options instead of as options
+# of the global command.
+
+previous_show_implementation = click.exceptions.NoSuchOption.show
+
+WRONG_APP_OPTION_USAGE_MESSAGE = """You are using `{option_name}` as an option of the {info_name} sub-command:
+celery {info_name} {option_name} celeryapp <...>
+
+The support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option:
+celery {option_name} celeryapp {info_name} <...>"""
+
+
+def _show(self, file=None):
+ if self.option_name in ('-A', '--app'):
+ self.ctx.obj.error(
+ WRONG_APP_OPTION_USAGE_MESSAGE.format(
+ option_name=self.option_name,
+ info_name=self.ctx.info_name),
+ fg='red'
+ )
+ previous_show_implementation(self, file=file)
+
+
+click.exceptions.NoSuchOption.show = _show
+
def main() -> int:
"""Start celery umbrella command.
From c41a5cfe363e6359aebcce553f02d11803e0ead0 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 1 Oct 2020 12:28:50 +0300
Subject: [PATCH 055/157] Remove test dependencies for Python 2.7.
---
requirements/test.txt | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/requirements/test.txt b/requirements/test.txt
index fd0ba172f90..8d338510e71 100644
--- a/requirements/test.txt
+++ b/requirements/test.txt
@@ -1,10 +1,8 @@
case>=1.3.1
-pytest~=4.6; python_version < '3.0'
-pytest~=6.0; python_version >= '3.0'
+pytest~=6.0
pytest-celery
pytest-timeout~=1.4.2
boto3>=1.9.178
-python-dateutil<2.8.1,>=2.1; python_version < '3.0'
moto==1.3.7
pre-commit
-r extras/yaml.txt
From 86e0d933ecaf588fee1903708c413edb3188dd24 Mon Sep 17 00:00:00 2001
From: Nicolas Dandrimont
Date: Thu, 1 Oct 2020 15:27:31 +0200
Subject: [PATCH 056/157] Restore the celery worker
--without-{gossip,mingle,heartbeat} flags (#6365)
In the previously used argparse arguments framework, these three options were
used as flags.
Since 5.0.0, they are options which need to take an argument (whose only
sensible value would be "true"). The error message coming up is also (very)
hard to understand, when running the celery worker command with an odd number
of flags:
Error: Unable to parse extra configuration from command line.
Reason: not enough values to unpack (expected 2, got 1)
When the celery worker is run with an even number of flags, the last one is
considered as an argument of the previous one, which is a subtle bug.
---
celery/bin/worker.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index 4d4c57aea16..834a01bdae2 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -231,15 +231,15 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
cls=CeleryOption,
help_group="Queue Options")
@click.option('--without-gossip',
- default=False,
+ is_flag=True,
cls=CeleryOption,
help_group="Features")
@click.option('--without-mingle',
- default=False,
+ is_flag=True,
cls=CeleryOption,
help_group="Features")
@click.option('--without-heartbeat',
- default=False,
+ is_flag=True,
cls=CeleryOption,
help_group="Features", )
@click.option('--heartbeat-interval',
From 8767df022a9175db3520c0dfb5d3d562711383f2 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 1 Oct 2020 14:23:31 +0300
Subject: [PATCH 057/157] Provide clearer error messages when app fails to
load.
---
celery/bin/celery.py | 31 +++++++++++++++++++++++++++++--
1 file changed, 29 insertions(+), 2 deletions(-)
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
index 9f4fa0cbe4c..5488d17c40e 100644
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -1,5 +1,6 @@
"""Celery Command Line Interface."""
import os
+import traceback
import click
import click.exceptions
@@ -25,6 +26,19 @@
from celery.bin.upgrade import upgrade
from celery.bin.worker import worker
+UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND = click.style("""
+Unable to load celery application.
+The module {0} was not found.""", fg='red')
+
+UNABLE_TO_LOAD_APP_ERROR_OCCURRED = click.style("""
+Unable to load celery application.
+While trying to load the module {0} the following error occurred:
+{1}""", fg='red')
+
+UNABLE_TO_LOAD_APP_APP_MISSING = click.style("""
+Unable to load celery application.
+{0}""")
+
class App(ParamType):
"""Application option."""
@@ -34,8 +48,21 @@ class App(ParamType):
def convert(self, value, param, ctx):
try:
return find_app(value)
- except (ModuleNotFoundError, AttributeError) as e:
- self.fail(str(e))
+ except ModuleNotFoundError as e:
+ if e.name != value:
+ exc = traceback.format_exc()
+ self.fail(
+ UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)
+ )
+ self.fail(UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND.format(e.name))
+ except AttributeError as e:
+ attribute_name = e.args[0].capitalize()
+ self.fail(UNABLE_TO_LOAD_APP_APP_MISSING.format(attribute_name))
+ except Exception:
+ exc = traceback.format_exc()
+ self.fail(
+ UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)
+ )
APP = App()
From 7288147d65a32b726869ed887d99e4bfd8c070e2 Mon Sep 17 00:00:00 2001
From: Thomas Grainger
Date: Sun, 4 Oct 2020 14:02:47 +0100
Subject: [PATCH 058/157] fix pytest plugin registration documentation (#6387)
* fix pytest plugin registration documentation
* Update docs/userguide/testing.rst
Co-authored-by: Thomas Grainger
Co-authored-by: Omer Katz
---
docs/userguide/testing.rst | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/docs/userguide/testing.rst b/docs/userguide/testing.rst
index 4deccd0f15c..330a24d1dc2 100644
--- a/docs/userguide/testing.rst
+++ b/docs/userguide/testing.rst
@@ -105,7 +105,8 @@ Celery initially ships the plugin in a disabled state, to enable it you can eith
* `pip install celery[pytest]`
* `pip install pytest-celery`
- * or add `pytest_plugins = 'celery.contrib.pytest'` to your pytest.ini
+ * or add an environment variable `PYTEST_PLUGINS=celery.contrib.pytest`
+ * or add `pytest_plugins = ("celery.contrib.pytest", )` to your root conftest.py
Marks
From 243d475b199f13ac2aa85d4225abd3a094ae781f Mon Sep 17 00:00:00 2001
From: Bas ten Berge
Date: Mon, 5 Oct 2020 07:09:29 +0200
Subject: [PATCH 059/157] Contains a workaround for the capitalized
configuration issue (#6385)
* Contains a workaround for the capitalized configuration issue
* Update celery/apps/worker.py
Co-authored-by: Omer Katz
* Update celery/apps/worker.py
Co-authored-by: Omer Katz
Co-authored-by: Omer Katz
---
celery/apps/worker.py | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/celery/apps/worker.py b/celery/apps/worker.py
index 6c1b5eb1c20..2a9df0c2e79 100644
--- a/celery/apps/worker.py
+++ b/celery/apps/worker.py
@@ -141,12 +141,25 @@ def on_start(self):
app.log.redirect_stdouts(self.redirect_stdouts_level)
# TODO: Remove the following code in Celery 6.0
- if app.conf.maybe_warn_deprecated_settings():
- logger.warning(
- "Please run `celery upgrade settings path/to/settings.py` "
- "to avoid these warnings and to allow a smoother upgrade "
- "to Celery 6.0."
- )
+ # This qualifies as a hack for issue #6366.
+ # a hack via app.__reduce_keys__(), but that may not work properly in
+ # all cases
+ warn_deprecated = True
+ config_source = app._config_source
+ if isinstance(config_source, str):
+ # Don't raise the warning when the settings originate from
+ # django.conf:settings
+ warn_deprecated = config_source.lower() not in [
+ 'django.conf:settings',
+ ]
+
+ if warn_deprecated:
+ if app.conf.maybe_warn_deprecated_settings():
+ logger.warning(
+ "Please run `celery upgrade settings path/to/settings.py` "
+ "to avoid these warnings and to allow a smoother upgrade "
+ "to Celery 6.0."
+ )
def emit_banner(self):
# Dump configuration to screen so we have some basic information
From 9eac689aa904e88b8327122629538980cd4ef6c9 Mon Sep 17 00:00:00 2001
From: Zvi Baratz
Date: Mon, 5 Oct 2020 18:09:18 +0300
Subject: [PATCH 060/157] Remove old explanation regarding `absolute_import`
(#6390)
Resolves #6389.
---
docs/django/first-steps-with-django.rst | 11 ++---------
1 file changed, 2 insertions(+), 9 deletions(-)
diff --git a/docs/django/first-steps-with-django.rst b/docs/django/first-steps-with-django.rst
index 003edcc8b06..956d965313b 100644
--- a/docs/django/first-steps-with-django.rst
+++ b/docs/django/first-steps-with-django.rst
@@ -54,15 +54,8 @@ for simple projects you may use a single contained module that defines
both the app and tasks, like in the :ref:`tut-celery` tutorial.
Let's break down what happens in the first module,
-first we import absolute imports from the future, so that our
-``celery.py`` module won't clash with the library:
-
-.. code-block:: python
-
- from __future__ import absolute_import
-
-Then we set the default :envvar:`DJANGO_SETTINGS_MODULE` environment variable
-for the :program:`celery` command-line program:
+first, we set the default :envvar:`DJANGO_SETTINGS_MODULE` environment
+variable for the :program:`celery` command-line program:
.. code-block:: python
From 66d2ea51ca8dff22dc11e6fd6119a3beedd83b51 Mon Sep 17 00:00:00 2001
From: Zvi Baratz
Date: Tue, 6 Oct 2020 18:07:54 +0300
Subject: [PATCH 061/157] Update canvas.rst (#6392)
* Update canvas.rst
Tiny fixes.
* Update docs/userguide/canvas.rst
Co-authored-by: Omer Katz
Co-authored-by: Omer Katz
---
docs/userguide/canvas.rst | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/docs/userguide/canvas.rst b/docs/userguide/canvas.rst
index fdfcaf2719a..10240768435 100644
--- a/docs/userguide/canvas.rst
+++ b/docs/userguide/canvas.rst
@@ -959,11 +959,11 @@ Map & Starmap
-------------
:class:`~celery.map` and :class:`~celery.starmap` are built-in tasks
-that calls the task for every element in a sequence.
+that call the provided calling task for every element in a sequence.
-They differ from group in that
+They differ from :class:`~celery.group` in that:
-- only one task message is sent
+- only one task message is sent.
- the operation is sequential.
@@ -1013,7 +1013,7 @@ Chunks
------
Chunking lets you divide an iterable of work into pieces, so that if
-you have one million objects, you can create 10 tasks with hundred
+you have one million objects, you can create 10 tasks with a hundred
thousand objects each.
Some may worry that chunking your tasks results in a degradation
From 8af82d7ed8625250907af268e7696e43570b2ac6 Mon Sep 17 00:00:00 2001
From: Justinas Petuchovas
Date: Wed, 7 Oct 2020 15:01:39 +0300
Subject: [PATCH 062/157] Remove duplicate words from docs (#6398)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Remove the duplicate usage of “required” in documentation (specifically, `introduction.rst`).
---
docs/getting-started/introduction.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/getting-started/introduction.rst b/docs/getting-started/introduction.rst
index ea2162467ae..f55f448da79 100644
--- a/docs/getting-started/introduction.rst
+++ b/docs/getting-started/introduction.rst
@@ -45,7 +45,7 @@ What do I need?
- PyPy3.6 ❨7.3❩
Celery 4.x was the last version to support Python 2.7,
- Celery 5.x requires Python 3.6 or newer is required.
+ Celery 5.x requires Python 3.6 or newer.
If you're running an older version of Python, you need to be running
an older version of Celery:
From 08fb1d06cff06397f365c546032479b8d9925931 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 7 Oct 2020 16:06:25 +0300
Subject: [PATCH 063/157] Allow lowercase log levels. (#6396)
Fixes #6395.
---
celery/bin/base.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/celery/bin/base.py b/celery/bin/base.py
index 662ba728ae9..fbb56d84dbb 100644
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -220,6 +220,7 @@ def __init__(self):
super().__init__(('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'FATAL'))
def convert(self, value, param, ctx):
+ value = value.upper()
value = super().convert(value, param, ctx)
return mlevel(value)
From 844774b76b8e434d5b88349be41c7c578b6fa48e Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 7 Oct 2020 16:07:44 +0300
Subject: [PATCH 064/157] Detach now correctly passes options with more than
one word. (#6394)
When specifying options such as `-E` the detached worker should receive the `--task-events` option.
Instead it got the `--task_events` option which doesn't exist and therefore silently failed.
This fixes #6362.
---
celery/bin/worker.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index 834a01bdae2..bf58dbea647 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -302,6 +302,7 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
executable = params.pop('executable')
argv = ['-m', 'celery', 'worker']
for arg, value in params.items():
+ arg = arg.replace("_", "-")
if isinstance(value, bool) and value:
argv.append(f'--{arg}')
else:
From 8a92b7128bc921d4332fe01486accc243115aba8 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 7 Oct 2020 16:11:42 +0300
Subject: [PATCH 065/157] The celery multi command now works as expected.
(#6388)
---
celery/apps/multi.py | 26 +++++++++++++++++++++---
celery/bin/multi.py | 7 ++++++-
requirements/test-ci-default.txt | 2 +-
t/unit/apps/test_multi.py | 35 ++++++++++++++++----------------
4 files changed, 47 insertions(+), 23 deletions(-)
diff --git a/celery/apps/multi.py b/celery/apps/multi.py
index b82eee4c9b3..a1458e3bd63 100644
--- a/celery/apps/multi.py
+++ b/celery/apps/multi.py
@@ -150,7 +150,7 @@ def _setdefaultopt(self, d, alt, value):
pass
value = d.setdefault(alt[0], os.path.normpath(value))
dir_path = os.path.dirname(value)
- if not os.path.exists(dir_path):
+ if dir_path and not os.path.exists(dir_path):
os.makedirs(dir_path)
return value
@@ -160,10 +160,30 @@ def _prepare_expander(self):
self.name, shortname, hostname)
def _prepare_argv(self):
+ cmd = self.expander(self.cmd).split(' ')
+ i = cmd.index('celery') + 1
+
+ options = self.options.copy()
+ for opt, value in self.options.items():
+ if opt in (
+ '-A', '--app',
+ '-b', '--broker',
+ '--result-backend',
+ '--loader',
+ '--config',
+ '--workdir',
+ '-C', '--no-color',
+ '-q', '--quiet',
+ ):
+ cmd.insert(i, format_opt(opt, self.expander(value)))
+
+ options.pop(opt)
+
+ cmd = [' '.join(cmd)]
argv = tuple(
- [self.expander(self.cmd)] +
+ cmd +
[format_opt(opt, self.expander(value))
- for opt, value in self.options.items()] +
+ for opt, value in options.items()] +
[self.extra_args]
)
if self.append:
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index 3e999ab2ab5..12bb52b87d2 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -471,4 +471,9 @@ def DOWN(self):
def multi(ctx):
"""Start multiple worker instances."""
cmd = MultiTool(quiet=ctx.obj.quiet, no_color=ctx.obj.no_color)
- return cmd.execute_from_commandline([''] + ctx.args)
+ # In 4.x, celery multi ignores the global --app option.
+ # Since in 5.0 the --app option is global only we
+ # rearrange the arguments so that the MultiTool will parse them correctly.
+ args = sys.argv[1:]
+ args = args[args.index('multi'):] + args[:args.index('multi')]
+ return cmd.execute_from_commandline(args)
diff --git a/requirements/test-ci-default.txt b/requirements/test-ci-default.txt
index 953ed9aecc7..fdcf4684733 100644
--- a/requirements/test-ci-default.txt
+++ b/requirements/test-ci-default.txt
@@ -12,7 +12,7 @@
-r extras/thread.txt
-r extras/elasticsearch.txt
-r extras/couchdb.txt
--r extras/couchbase.txt
+#-r extras/couchbase.txt
-r extras/arangodb.txt
-r extras/consul.txt
-r extras/cosmosdbsql.txt
diff --git a/t/unit/apps/test_multi.py b/t/unit/apps/test_multi.py
index f7de1d5e27f..4c3fd9bfc1f 100644
--- a/t/unit/apps/test_multi.py
+++ b/t/unit/apps/test_multi.py
@@ -69,7 +69,7 @@ def test_parse(self, gethostname, mkdirs_mock):
'--', '.disable_rate_limits=1',
])
p.parse()
- it = multi_args(p, cmd='COMMAND', append='*AP*',
+ it = multi_args(p, cmd='celery multi', append='*AP*',
prefix='*P*', suffix='*S*')
nodes = list(it)
@@ -85,32 +85,32 @@ def assert_line_in(name, args):
assert_line_in(
'*P*jerry@*S*',
- ['COMMAND', '-n *P*jerry@*S*', '-Q bar',
+ ['celery multi', '-n *P*jerry@*S*', '-Q bar',
'-c 5', '--flag', '--logfile=/var/log/celery/foo',
'-- .disable_rate_limits=1', '*AP*'],
)
assert_line_in(
'*P*elaine@*S*',
- ['COMMAND', '-n *P*elaine@*S*', '-Q bar',
+ ['celery multi', '-n *P*elaine@*S*', '-Q bar',
'-c 5', '--flag', '--logfile=/var/log/celery/foo',
'-- .disable_rate_limits=1', '*AP*'],
)
assert_line_in(
'*P*kramer@*S*',
- ['COMMAND', '--loglevel=DEBUG', '-n *P*kramer@*S*',
+ ['celery multi', '--loglevel=DEBUG', '-n *P*kramer@*S*',
'-Q bar', '--flag', '--logfile=/var/log/celery/foo',
'-- .disable_rate_limits=1', '*AP*'],
)
expand = nodes[0].expander
assert expand('%h') == '*P*jerry@*S*'
assert expand('%n') == '*P*jerry'
- nodes2 = list(multi_args(p, cmd='COMMAND', append='',
+ nodes2 = list(multi_args(p, cmd='celery multi', append='',
prefix='*P*', suffix='*S*'))
assert nodes2[0].argv[-1] == '-- .disable_rate_limits=1'
p2 = NamespacedOptionParser(['10', '-c:1', '5'])
p2.parse()
- nodes3 = list(multi_args(p2, cmd='COMMAND'))
+ nodes3 = list(multi_args(p2, cmd='celery multi'))
def _args(name, *args):
return args + (
@@ -123,40 +123,40 @@ def _args(name, *args):
assert len(nodes3) == 10
assert nodes3[0].name == 'celery1@example.com'
assert nodes3[0].argv == (
- 'COMMAND', '-c 5', '-n celery1@example.com') + _args('celery1')
+ 'celery multi', '-c 5', '-n celery1@example.com') + _args('celery1')
for i, worker in enumerate(nodes3[1:]):
assert worker.name == 'celery%s@example.com' % (i + 2)
node_i = f'celery{i + 2}'
assert worker.argv == (
- 'COMMAND',
+ 'celery multi',
f'-n {node_i}@example.com') + _args(node_i)
- nodes4 = list(multi_args(p2, cmd='COMMAND', suffix='""'))
+ nodes4 = list(multi_args(p2, cmd='celery multi', suffix='""'))
assert len(nodes4) == 10
assert nodes4[0].name == 'celery1@'
assert nodes4[0].argv == (
- 'COMMAND', '-c 5', '-n celery1@') + _args('celery1')
+ 'celery multi', '-c 5', '-n celery1@') + _args('celery1')
p3 = NamespacedOptionParser(['foo@', '-c:foo', '5'])
p3.parse()
- nodes5 = list(multi_args(p3, cmd='COMMAND', suffix='""'))
+ nodes5 = list(multi_args(p3, cmd='celery multi', suffix='""'))
assert nodes5[0].name == 'foo@'
assert nodes5[0].argv == (
- 'COMMAND', '-c 5', '-n foo@') + _args('foo')
+ 'celery multi', '-c 5', '-n foo@') + _args('foo')
p4 = NamespacedOptionParser(['foo', '-Q:1', 'test'])
p4.parse()
- nodes6 = list(multi_args(p4, cmd='COMMAND', suffix='""'))
+ nodes6 = list(multi_args(p4, cmd='celery multi', suffix='""'))
assert nodes6[0].name == 'foo@'
assert nodes6[0].argv == (
- 'COMMAND', '-Q test', '-n foo@') + _args('foo')
+ 'celery multi', '-Q test', '-n foo@') + _args('foo')
p5 = NamespacedOptionParser(['foo@bar', '-Q:1', 'test'])
p5.parse()
- nodes7 = list(multi_args(p5, cmd='COMMAND', suffix='""'))
+ nodes7 = list(multi_args(p5, cmd='celery multi', suffix='""'))
assert nodes7[0].name == 'foo@bar'
assert nodes7[0].argv == (
- 'COMMAND', '-Q test', '-n foo@bar') + _args('foo')
+ 'celery multi', '-Q test', '-n foo@bar') + _args('foo')
p6 = NamespacedOptionParser(['foo@bar', '-Q:0', 'test'])
p6.parse()
@@ -192,8 +192,7 @@ def test_from_kwargs(self):
max_tasks_per_child=30, A='foo', Q='q1,q2', O='fair',
)
assert sorted(n.argv) == sorted([
- '-m celery worker --detach',
- '-A foo',
+ '-m celery -A foo worker --detach',
f'--executable={n.executable}',
'-O fair',
'-n foo@bar.com',
From b81ac620800a914eba9e18825b86325709c59421 Mon Sep 17 00:00:00 2001
From: bastb
Date: Sun, 11 Oct 2020 16:17:41 +0200
Subject: [PATCH 066/157] Contains the missed change requested by @thedrow
---
celery/apps/worker.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/celery/apps/worker.py b/celery/apps/worker.py
index 2a9df0c2e79..882751fb8a9 100644
--- a/celery/apps/worker.py
+++ b/celery/apps/worker.py
@@ -142,8 +142,6 @@ def on_start(self):
# TODO: Remove the following code in Celery 6.0
# This qualifies as a hack for issue #6366.
- # a hack via app.__reduce_keys__(), but that may not work properly in
- # all cases
warn_deprecated = True
config_source = app._config_source
if isinstance(config_source, str):
From c508296ce64b54578d37a66bc8e34ccba667e2e2 Mon Sep 17 00:00:00 2001
From: Zvi Baratz
Date: Sat, 10 Oct 2020 12:45:13 +0300
Subject: [PATCH 067/157] Added a some celery configuration examples.
---
docs/django/first-steps-with-django.rst | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/docs/django/first-steps-with-django.rst b/docs/django/first-steps-with-django.rst
index 956d965313b..ce48203d66c 100644
--- a/docs/django/first-steps-with-django.rst
+++ b/docs/django/first-steps-with-django.rst
@@ -54,7 +54,7 @@ for simple projects you may use a single contained module that defines
both the app and tasks, like in the :ref:`tut-celery` tutorial.
Let's break down what happens in the first module,
-first, we set the default :envvar:`DJANGO_SETTINGS_MODULE` environment
+first, we set the default :envvar:`DJANGO_SETTINGS_MODULE` environment
variable for the :program:`celery` command-line program:
.. code-block:: python
@@ -90,6 +90,18 @@ setting becomes ``CELERY_BROKER_URL``. This also applies to the
workers settings, for instance, the :setting:`worker_concurrency`
setting becomes ``CELERY_WORKER_CONCURRENCY``.
+For example, a Django project's configuration file might include:
+
+.. code-block:: python
+ :caption: settings.py
+
+ ...
+
+ # Celery Configuration Options
+ CELERY_TIMEZONE = "Australia/Tasmania"
+ CELERY_TASK_TRACK_STARTED = True
+ CELERY_TASK_TIME_LIMIT = 30 * 60
+
You can pass the settings object directly instead, but using a string
is better since then the worker doesn't have to serialize the object.
The ``CELERY_`` namespace is also optional, but recommended (to
From 70cbed3e2f6992ecbbc9257f60a9d251a6dceaf6 Mon Sep 17 00:00:00 2001
From: Artem Bernatskyi
Date: Mon, 12 Oct 2020 16:37:23 +0300
Subject: [PATCH 068/157] fixed loglevel info->INFO in docs
---
docs/django/first-steps-with-django.rst | 2 +-
docs/getting-started/next-steps.rst | 14 +++++++-------
docs/userguide/application.rst | 4 ++--
docs/userguide/calling.rst | 2 +-
docs/userguide/debugging.rst | 2 +-
docs/userguide/workers.rst | 10 +++++-----
examples/app/myapp.py | 6 +++---
examples/django/README.rst | 2 +-
examples/eventlet/README.rst | 2 +-
examples/periodic-tasks/myapp.py | 8 ++++----
examples/security/mysecureapp.py | 2 +-
11 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/docs/django/first-steps-with-django.rst b/docs/django/first-steps-with-django.rst
index ce48203d66c..55d64c990eb 100644
--- a/docs/django/first-steps-with-django.rst
+++ b/docs/django/first-steps-with-django.rst
@@ -247,7 +247,7 @@ development it is useful to be able to start a worker instance by using the
.. code-block:: console
- $ celery -A proj worker -l info
+ $ celery -A proj worker -l INFO
For a complete listing of the command-line options available,
use the help command:
diff --git a/docs/getting-started/next-steps.rst b/docs/getting-started/next-steps.rst
index 1cf0b35f714..2b66fd5ce04 100644
--- a/docs/getting-started/next-steps.rst
+++ b/docs/getting-started/next-steps.rst
@@ -74,7 +74,7 @@ The :program:`celery` program can be used to start the worker (you need to run t
.. code-block:: console
- $ celery -A proj worker -l info
+ $ celery -A proj worker -l INFO
When the worker starts you should see a banner and some messages::
@@ -152,7 +152,7 @@ start one or more workers in the background:
.. code-block:: console
- $ celery multi start w1 -A proj -l info
+ $ celery multi start w1 -A proj -l INFO
celery multi v4.0.0 (latentcall)
> Starting nodes...
> w1.halcyon.local: OK
@@ -161,7 +161,7 @@ You can restart it too:
.. code-block:: console
- $ celery multi restart w1 -A proj -l info
+ $ celery multi restart w1 -A proj -l INFO
celery multi v4.0.0 (latentcall)
> Stopping nodes...
> w1.halcyon.local: TERM -> 64024
@@ -176,7 +176,7 @@ or stop it:
.. code-block:: console
- $ celery multi stop w1 -A proj -l info
+ $ celery multi stop w1 -A proj -l INFO
The ``stop`` command is asynchronous so it won't wait for the
worker to shutdown. You'll probably want to use the ``stopwait`` command
@@ -185,7 +185,7 @@ before exiting:
.. code-block:: console
- $ celery multi stopwait w1 -A proj -l info
+ $ celery multi stopwait w1 -A proj -l INFO
.. note::
@@ -202,7 +202,7 @@ you're encouraged to put these in a dedicated directory:
$ mkdir -p /var/run/celery
$ mkdir -p /var/log/celery
- $ celery multi start w1 -A proj -l info --pidfile=/var/run/celery/%n.pid \
+ $ celery multi start w1 -A proj -l INFO --pidfile=/var/run/celery/%n.pid \
--logfile=/var/log/celery/%n%I.log
With the multi command you can start multiple workers, and there's a powerful
@@ -211,7 +211,7 @@ for example:
.. code-block:: console
- $ celery multi start 10 -A proj -l info -Q:1-3 images,video -Q:4,5 data \
+ $ celery multi start 10 -A proj -l INFO -Q:1-3 images,video -Q:4,5 data \
-Q default -L:4,5 debug
For more examples see the :mod:`~celery.bin.multi` module in the API
diff --git a/docs/userguide/application.rst b/docs/userguide/application.rst
index 1e6c4cf13ae..6ec6c7f8f89 100644
--- a/docs/userguide/application.rst
+++ b/docs/userguide/application.rst
@@ -257,7 +257,7 @@ You can then specify the configuration module to use via the environment:
.. code-block:: console
- $ CELERY_CONFIG_MODULE="celeryconfig.prod" celery worker -l info
+ $ CELERY_CONFIG_MODULE="celeryconfig.prod" celery worker -l INFO
.. _app-censored-config:
@@ -431,7 +431,7 @@ chain breaks:
.. code-block:: console
- $ CELERY_TRACE_APP=1 celery worker -l info
+ $ CELERY_TRACE_APP=1 celery worker -l INFO
.. topic:: Evolving the API
diff --git a/docs/userguide/calling.rst b/docs/userguide/calling.rst
index 04c7f9ba718..811820b44a1 100644
--- a/docs/userguide/calling.rst
+++ b/docs/userguide/calling.rst
@@ -692,7 +692,7 @@ the workers :option:`-Q ` argument:
.. code-block:: console
- $ celery -A proj worker -l info -Q celery,priority.high
+ $ celery -A proj worker -l INFO -Q celery,priority.high
.. seealso::
diff --git a/docs/userguide/debugging.rst b/docs/userguide/debugging.rst
index 4eeb539be36..690e2acb4bd 100644
--- a/docs/userguide/debugging.rst
+++ b/docs/userguide/debugging.rst
@@ -110,7 +110,7 @@ For example starting the worker with:
.. code-block:: console
- $ CELERY_RDBSIG=1 celery worker -l info
+ $ CELERY_RDBSIG=1 celery worker -l INFO
You can start an rdb session for any of the worker processes by executing:
diff --git a/docs/userguide/workers.rst b/docs/userguide/workers.rst
index 098d3005f68..aec8c9e5414 100644
--- a/docs/userguide/workers.rst
+++ b/docs/userguide/workers.rst
@@ -23,7 +23,7 @@ You can start the worker in the foreground by executing the command:
.. code-block:: console
- $ celery -A proj worker -l info
+ $ celery -A proj worker -l INFO
For a full list of available command-line options see
:mod:`~celery.bin.worker`, or simply do:
@@ -108,7 +108,7 @@ is by using `celery multi`:
.. code-block:: console
- $ celery multi start 1 -A proj -l info -c4 --pidfile=/var/run/celery/%n.pid
+ $ celery multi start 1 -A proj -l INFO -c4 --pidfile=/var/run/celery/%n.pid
$ celery multi restart 1 --pidfile=/var/run/celery/%n.pid
For production deployments you should be using init-scripts or a process
@@ -410,7 +410,7 @@ argument to :program:`celery worker`:
.. code-block:: console
- $ celery -A proj worker -l info --statedb=/var/run/celery/worker.state
+ $ celery -A proj worker -l INFO --statedb=/var/run/celery/worker.state
or if you use :program:`celery multi` you want to create one file per
worker instance so use the `%n` format to expand the current node
@@ -418,7 +418,7 @@ name:
.. code-block:: console
- celery multi start 2 -l info --statedb=/var/run/celery/%n.state
+ celery multi start 2 -l INFO --statedb=/var/run/celery/%n.state
See also :ref:`worker-files`
@@ -611,7 +611,7 @@ separated list of queues to the :option:`-Q ` option:
.. code-block:: console
- $ celery -A proj worker -l info -Q foo,bar,baz
+ $ celery -A proj worker -l INFO -Q foo,bar,baz
If the queue name is defined in :setting:`task_queues` it will use that
configuration, but if it's not defined in the list of queues Celery will
diff --git a/examples/app/myapp.py b/examples/app/myapp.py
index 3490a3940bd..7ee8727095a 100644
--- a/examples/app/myapp.py
+++ b/examples/app/myapp.py
@@ -2,7 +2,7 @@
Usage::
- (window1)$ python myapp.py worker -l info
+ (window1)$ python myapp.py worker -l INFO
(window2)$ python
>>> from myapp import add
@@ -13,13 +13,13 @@
You can also specify the app to use with the `celery` command,
using the `-A` / `--app` option::
- $ celery -A myapp worker -l info
+ $ celery -A myapp worker -l INFO
With the `-A myproj` argument the program will search for an app
instance in the module ``myproj``. You can also specify an explicit
name using the fully qualified form::
- $ celery -A myapp:app worker -l info
+ $ celery -A myapp:app worker -l INFO
"""
diff --git a/examples/django/README.rst b/examples/django/README.rst
index 0334ef7df04..80d7a13cadd 100644
--- a/examples/django/README.rst
+++ b/examples/django/README.rst
@@ -46,7 +46,7 @@ Starting the worker
.. code-block:: console
- $ celery -A proj worker -l info
+ $ celery -A proj worker -l INFO
Running a task
===================
diff --git a/examples/eventlet/README.rst b/examples/eventlet/README.rst
index 672ff6f1461..84a1856f314 100644
--- a/examples/eventlet/README.rst
+++ b/examples/eventlet/README.rst
@@ -18,7 +18,7 @@ Before you run any of the example tasks you need to start
the worker::
$ cd examples/eventlet
- $ celery worker -l info --concurrency=500 --pool=eventlet
+ $ celery worker -l INFO --concurrency=500 --pool=eventlet
As usual you need to have RabbitMQ running, see the Celery getting started
guide if you haven't installed it yet.
diff --git a/examples/periodic-tasks/myapp.py b/examples/periodic-tasks/myapp.py
index 166b9234146..b2e4f0b8045 100644
--- a/examples/periodic-tasks/myapp.py
+++ b/examples/periodic-tasks/myapp.py
@@ -3,10 +3,10 @@
Usage::
# The worker service reacts to messages by executing tasks.
- (window1)$ python myapp.py worker -l info
+ (window1)$ python myapp.py worker -l INFO
# The beat service sends messages at scheduled intervals.
- (window2)$ python myapp.py beat -l info
+ (window2)$ python myapp.py beat -l INFO
# XXX To diagnose problems use -l debug:
(window2)$ python myapp.py beat -l debug
@@ -18,13 +18,13 @@
You can also specify the app to use with the `celery` command,
using the `-A` / `--app` option::
- $ celery -A myapp worker -l info
+ $ celery -A myapp worker -l INFO
With the `-A myproj` argument the program will search for an app
instance in the module ``myproj``. You can also specify an explicit
name using the fully qualified form::
- $ celery -A myapp:app worker -l info
+ $ celery -A myapp:app worker -l INFO
"""
diff --git a/examples/security/mysecureapp.py b/examples/security/mysecureapp.py
index 9578fa62272..21061a890da 100644
--- a/examples/security/mysecureapp.py
+++ b/examples/security/mysecureapp.py
@@ -14,7 +14,7 @@
cd examples/security
- (window1)$ python mysecureapp.py worker -l info
+ (window1)$ python mysecureapp.py worker -l INFO
(window2)$ cd examples/security
(window2)$ python
From 06dfe27d4542860c01c870a41fc5fbb80e5c4e20 Mon Sep 17 00:00:00 2001
From: ZubAnt
Date: Wed, 7 Oct 2020 20:33:59 +0300
Subject: [PATCH 069/157] return list instead set in CommaSeparatedList
_broadcast method of kombu Mailbox. does not support set
https://github.com/celery/kombu/blob/7b2578b19ba4b1989b722f6f6e7efee2a1a4d86a/kombu/pidbox.py#L319
---
celery/bin/base.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/bin/base.py b/celery/bin/base.py
index fbb56d84dbb..52f94382c65 100644
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -168,7 +168,7 @@ class CommaSeparatedList(ParamType):
name = "comma separated list"
def convert(self, value, param, ctx):
- return set(text.str_to_list(value))
+ return text.str_to_list(value)
class Json(ParamType):
From 735f1679047a1358254252edc5cbf2624c86aadc Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Tue, 13 Oct 2020 14:48:21 +0300
Subject: [PATCH 070/157] Rewrite detaching logic (#6401)
* Rewrite detaching logic.
* Ignore empty arguments.
* Ensure the SystemD services are up to date.
---
celery/apps/multi.py | 2 +-
celery/bin/worker.py | 53 +++++++++-------------
docs/userguide/daemonizing.rst | 75 ++++++++++++++++----------------
extra/systemd/celery.service | 10 ++---
extra/systemd/celerybeat.service | 6 +--
5 files changed, 68 insertions(+), 78 deletions(-)
diff --git a/celery/apps/multi.py b/celery/apps/multi.py
index a1458e3bd63..448c7cd6fbd 100644
--- a/celery/apps/multi.py
+++ b/celery/apps/multi.py
@@ -78,7 +78,7 @@ def __init__(self, args):
self.namespaces = defaultdict(lambda: OrderedDict())
def parse(self):
- rargs = list(self.args)
+ rargs = [arg for arg in self.args if arg]
pos = 0
while pos < len(rargs):
arg = rargs[pos]
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index bf58dbea647..0472fde4c4b 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -10,7 +10,8 @@
from celery import concurrency
from celery.bin.base import (COMMA_SEPARATED_LIST, LOG_LEVEL,
CeleryDaemonCommand, CeleryOption)
-from celery.platforms import EX_FAILURE, detached, maybe_drop_privileges
+from celery.platforms import (EX_FAILURE, EX_OK, detached,
+ maybe_drop_privileges)
from celery.utils.log import get_logger
from celery.utils.nodenames import default_nodename, host_format, node_format
@@ -99,6 +100,7 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
if executable is not None:
path = executable
os.execv(path, [path] + argv)
+ return EX_OK
except Exception: # pylint: disable=broad-except
if app is None:
from celery import current_app
@@ -107,7 +109,7 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
'ERROR', logfile, hostname=hostname)
logger.critical("Can't exec %r", ' '.join([path] + argv),
exc_info=True)
- return EX_FAILURE
+ return EX_FAILURE
@click.command(cls=CeleryDaemonCommand,
@@ -290,36 +292,23 @@ def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
"Unable to parse extra configuration from command line.\n"
f"Reason: {e}", ctx=ctx)
if kwargs.get('detach', False):
- params = ctx.params.copy()
- params.pop('detach')
- params.pop('logfile')
- params.pop('pidfile')
- params.pop('uid')
- params.pop('gid')
- umask = params.pop('umask')
- workdir = ctx.obj.workdir
- params.pop('hostname')
- executable = params.pop('executable')
- argv = ['-m', 'celery', 'worker']
- for arg, value in params.items():
- arg = arg.replace("_", "-")
- if isinstance(value, bool) and value:
- argv.append(f'--{arg}')
- else:
- if value is not None:
- argv.append(f'--{arg}')
- argv.append(str(value))
- return detach(sys.executable,
- argv,
- logfile=logfile,
- pidfile=pidfile,
- uid=uid, gid=gid,
- umask=umask,
- workdir=workdir,
- app=app,
- executable=executable,
- hostname=hostname)
- return
+ argv = ['-m', 'celery'] + sys.argv[1:]
+ if '--detach' in argv:
+ argv.remove('--detach')
+ if '-D' in argv:
+ argv.remove('-D')
+
+ return detach(sys.executable,
+ argv,
+ logfile=logfile,
+ pidfile=pidfile,
+ uid=uid, gid=gid,
+ umask=kwargs.get('umask', None),
+ workdir=kwargs.get('workdir', None),
+ app=app,
+ executable=kwargs.get('executable', None),
+ hostname=hostname)
+
maybe_drop_privileges(uid=uid, gid=gid)
worker = app.Worker(
hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
diff --git a/docs/userguide/daemonizing.rst b/docs/userguide/daemonizing.rst
index 07e39009c97..ae804f6c32e 100644
--- a/docs/userguide/daemonizing.rst
+++ b/docs/userguide/daemonizing.rst
@@ -389,27 +389,28 @@ This is an example systemd file:
.. code-block:: bash
- [Unit]
- Description=Celery Service
- After=network.target
-
- [Service]
- Type=forking
- User=celery
- Group=celery
- EnvironmentFile=/etc/conf.d/celery
- WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi start ${CELERYD_NODES} \
- --pidfile=${CELERYD_PID_FILE} \
- --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
- ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait ${CELERYD_NODES} \
- --pidfile=${CELERYD_PID_FILE}'
- ExecReload=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} multi restart ${CELERYD_NODES} \
- --pidfile=${CELERYD_PID_FILE} \
- --logfile=${CELERYD_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL} ${CELERYD_OPTS}'
-
- [Install]
- WantedBy=multi-user.target
+ [Unit]
+ Description=Celery Service
+ After=network.target
+
+ [Service]
+ Type=forking
+ User=celery
+ Group=celery
+ EnvironmentFile=/etc/conf.d/celery
+ WorkingDirectory=/opt/celery
+ ExecStart=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi start $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
+ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
+ ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --loglevel="${CELERYD_LOG_LEVEL}"'
+ ExecReload=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi restart $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
+ --loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
+ Restart=always
+
+ [Install]
+ WantedBy=multi-user.target
Once you've put that file in :file:`/etc/systemd/system`, you should run
:command:`systemctl daemon-reload` in order that Systemd acknowledges that file.
@@ -482,22 +483,22 @@ This is an example systemd file for Celery Beat:
.. code-block:: bash
- [Unit]
- Description=Celery Beat Service
- After=network.target
-
- [Service]
- Type=simple
- User=celery
- Group=celery
- EnvironmentFile=/etc/conf.d/celery
- WorkingDirectory=/opt/celery
- ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
- --pidfile=${CELERYBEAT_PID_FILE} \
- --logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
-
- [Install]
- WantedBy=multi-user.target
+ [Unit]
+ Description=Celery Beat Service
+ After=network.target
+
+ [Service]
+ Type=simple
+ User=celery
+ Group=celery
+ EnvironmentFile=/etc/conf.d/celery
+ WorkingDirectory=/opt/celery
+ ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
+ --pidfile=${CELERYBEAT_PID_FILE} \
+ --logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
+
+ [Install]
+ WantedBy=multi-user.target
Running the worker with superuser privileges (root)
diff --git a/extra/systemd/celery.service b/extra/systemd/celery.service
index b1d6d03b723..2510fb83cb0 100644
--- a/extra/systemd/celery.service
+++ b/extra/systemd/celery.service
@@ -8,13 +8,13 @@ User=celery
Group=celery
EnvironmentFile=-/etc/conf.d/celery
WorkingDirectory=/opt/celery
-ExecStart=/bin/sh -c '${CELERY_BIN} multi start $CELERYD_NODES \
- -A $CELERY_APP --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
+ExecStart=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi start $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
--loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait $CELERYD_NODES \
- --pidfile=${CELERYD_PID_FILE}'
-ExecReload=/bin/sh -c '${CELERY_BIN} multi restart $CELERYD_NODES \
- -A $CELERY_APP --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
+ --pidfile=${CELERYD_PID_FILE} --loglevel="${CELERYD_LOG_LEVEL}"'
+ExecReload=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi restart $CELERYD_NODES \
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
--loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
Restart=always
diff --git a/extra/systemd/celerybeat.service b/extra/systemd/celerybeat.service
index c8879612d19..8cb2ad3687e 100644
--- a/extra/systemd/celerybeat.service
+++ b/extra/systemd/celerybeat.service
@@ -6,10 +6,10 @@ After=network.target
Type=simple
User=celery
Group=celery
-EnvironmentFile=-/etc/conf.d/celery
+EnvironmentFile=/etc/conf.d/celery
WorkingDirectory=/opt/celery
-ExecStart=/bin/sh -c '${CELERY_BIN} beat \
- -A ${CELERY_APP} --pidfile=${CELERYBEAT_PID_FILE} \
+ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
+ --pidfile=${CELERYBEAT_PID_FILE} \
--logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
[Install]
From 9367d3615f99d6ec623dd235e7ec5387fe9926eb Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Tue, 13 Oct 2020 16:31:29 +1100
Subject: [PATCH 071/157] fix: Pass back real result for single task chains
When chains are delayed, they are first frozen as part of preparation
which causes the sub-tasks to also be frozen. Afterward, the final (0th
since we reverse the tasks/result order when freezing) result object
from the freezing process would be passed back to the caller. This
caused problems in signaling completion of groups contained in chains
because the group relies on a promise which is fulfilled by a barrier
linked to each of its applied subtasks. By constructing two
`GroupResult` objects (one during freezing, one when the chain sub-tasks
are applied), this resulted in there being two promises; only one of
which would actually be fulfilled by the group subtasks.
This change ensures that in the special case where a chain has a single
task, we pass back the result object constructed when the task was
actually applied. When that single child is a group which does not get
unrolled (ie. contains more than one child itself), this ensures that we
pass back a `GroupResult` object which will actually be fulfilled. The
caller can then await the result confidently!
---
celery/canvas.py | 16 +++++++++----
t/integration/test_canvas.py | 18 +++++++++++++++
t/unit/tasks/test_canvas.py | 44 +++++++++++++++++++++++++++++++++++-
3 files changed, 73 insertions(+), 5 deletions(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index 7871f7b395d..866c1c888b2 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -652,19 +652,27 @@ def run(self, args=None, kwargs=None, group_id=None, chord=None,
args = (tuple(args) + tuple(self.args)
if args and not self.immutable else self.args)
- tasks, results = self.prepare_steps(
+ tasks, results_from_prepare = self.prepare_steps(
args, kwargs, self.tasks, root_id, parent_id, link_error, app,
task_id, group_id, chord,
)
- if results:
+ if results_from_prepare:
if link:
tasks[0].extend_list_option('link', link)
first_task = tasks.pop()
options = _prepare_chain_from_options(options, tasks, use_link)
- first_task.apply_async(**options)
- return results[0]
+ result_from_apply = first_task.apply_async(**options)
+ # If we only have a single task, it may be important that we pass
+ # the real result object rather than the one obtained via freezing.
+ # e.g. For `GroupResult`s, we need to pass back the result object
+ # which will actually have its promise fulfilled by the subtasks,
+ # something that will never occur for the frozen result.
+ if not tasks:
+ return result_from_apply
+ else:
+ return results_from_prepare[0]
def freeze(self, _id=None, group_id=None, chord=None,
root_id=None, parent_id=None, group_index=None):
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index f5d19184a34..6b1f316b03e 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -413,6 +413,16 @@ def test_chain_of_a_chord_and_three_tasks_and_a_group(self, manager):
res = c()
assert res.get(timeout=TIMEOUT) == [8, 8]
+ def test_nested_chain_group_lone(self, manager):
+ """
+ Test that a lone group in a chain completes.
+ """
+ sig = chain(
+ group(identity.s(42), identity.s(42)), # [42, 42]
+ )
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
class test_result_set:
@@ -504,6 +514,14 @@ def test_large_group(self, manager):
assert res.get(timeout=TIMEOUT) == list(range(1000))
+ def test_group_lone(self, manager):
+ """
+ Test that a simple group completes.
+ """
+ sig = group(identity.s(42), identity.s(42)) # [42, 42]
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
def assert_ids(r, expected_value, expected_root_id, expected_parent_id):
root_id, parent_id, value = r.get(timeout=TIMEOUT)
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
index 53f98615e8e..c15dec83d60 100644
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -1,5 +1,5 @@
import json
-from unittest.mock import MagicMock, Mock
+from unittest.mock import MagicMock, Mock, patch
import pytest
@@ -535,6 +535,48 @@ def test_append_to_empty_chain(self):
assert x.apply().get() == 3
+ @pytest.mark.usefixtures('depends_on_current_app')
+ def test_chain_single_child_result(self):
+ child_sig = self.add.si(1, 1)
+ chain_sig = chain(child_sig)
+ assert chain_sig.tasks[0] is child_sig
+
+ with patch.object(
+ # We want to get back the result of actually applying the task
+ child_sig, "apply_async",
+ ) as mock_apply, patch.object(
+ # The child signature may be clone by `chain.prepare_steps()`
+ child_sig, "clone", return_value=child_sig,
+ ):
+ res = chain_sig()
+ # `_prepare_chain_from_options()` sets this `chain` kwarg with the
+ # subsequent tasks which would be run - nothing in this case
+ mock_apply.assert_called_once_with(chain=[])
+ assert res is mock_apply.return_value
+
+ @pytest.mark.usefixtures('depends_on_current_app')
+ def test_chain_single_child_group_result(self):
+ child_sig = self.add.si(1, 1)
+ # The group will `clone()` the child during instantiation so mock it
+ with patch.object(child_sig, "clone", return_value=child_sig):
+ group_sig = group(child_sig)
+ # Now we can construct the chain signature which is actually under test
+ chain_sig = chain(group_sig)
+ assert chain_sig.tasks[0].tasks[0] is child_sig
+
+ with patch.object(
+ # We want to get back the result of actually applying the task
+ child_sig, "apply_async",
+ ) as mock_apply, patch.object(
+ # The child signature may be clone by `chain.prepare_steps()`
+ child_sig, "clone", return_value=child_sig,
+ ):
+ res = chain_sig()
+ # `_prepare_chain_from_options()` sets this `chain` kwarg with the
+ # subsequent tasks which would be run - nothing in this case
+ mock_apply.assert_called_once_with(chain=[])
+ assert res is mock_apply.return_value
+
class test_group(CanvasCase):
From f1dbf3f05fb047c4d57c61b4ccaa0dcedd16e193 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Fri, 18 Sep 2020 14:23:40 +1000
Subject: [PATCH 072/157] fix: Retain `group_id` when tasks get re-frozen
When a group task which is part of a chain was to be delayed by
`trace_task()`, it would be reconstructed from the serialized request.
Normally, this sets the `group_id` of encapsulated tasks to the ID of
the group being instantiated. However, in the specific situation of a
group that is the last task in a chain which contributes to the
completion of a chord, it is essential that the group ID of the top-most
group is used instead. This top-most group ID is used by the redis
backend to track the completions of "final elements" of a chord in the
`on_chord_part_return()` implementation. By overwriting the group ID
which was already set in the `options` dictionaries of the child tasks
being deserialized, the chord accounting done by the redis backend would
be made inaccurate and chords would never complete.
This change alters how options are overridden for signatures to ensure
that if a `group_id` has already been set, it cannot be overridden.
Since group ID should be generally opaque to users, this should not be
disruptive.
---
celery/canvas.py | 23 +++++++++++++++++------
t/unit/tasks/test_canvas.py | 25 ++++++++++++++++++++++++-
2 files changed, 41 insertions(+), 7 deletions(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index 866c1c888b2..f767de1ce0a 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -122,6 +122,9 @@ class Signature(dict):
TYPES = {}
_app = _type = None
+ # The following fields must not be changed during freezing/merging because
+ # to do so would disrupt completion of parent tasks
+ _IMMUTABLE_OPTIONS = {"group_id"}
@classmethod
def register_type(cls, name=None):
@@ -224,14 +227,22 @@ def apply_async(self, args=None, kwargs=None, route_name=None, **options):
def _merge(self, args=None, kwargs=None, options=None, force=False):
args = args if args else ()
kwargs = kwargs if kwargs else {}
- options = options if options else {}
+ if options is not None:
+ # We build a new options dictionary where values in `options`
+ # override values in `self.options` except for keys which are
+ # noted as being immutable (unrelated to signature immutability)
+ # implying that allowing their value to change would stall tasks
+ new_options = dict(self.options, **{
+ k: v for k, v in options.items()
+ if k not in self._IMMUTABLE_OPTIONS or k not in self.options
+ })
+ else:
+ new_options = self.options
if self.immutable and not force:
- return (self.args, self.kwargs,
- dict(self.options,
- **options) if options else self.options)
+ return (self.args, self.kwargs, new_options)
return (tuple(args) + tuple(self.args) if args else self.args,
dict(self.kwargs, **kwargs) if kwargs else self.kwargs,
- dict(self.options, **options) if options else self.options)
+ new_options)
def clone(self, args=None, kwargs=None, **opts):
"""Create a copy of this signature.
@@ -286,7 +297,7 @@ def freeze(self, _id=None, group_id=None, chord=None,
opts['parent_id'] = parent_id
if 'reply_to' not in opts:
opts['reply_to'] = self.app.oid
- if group_id:
+ if group_id and "group_id" not in opts:
opts['group_id'] = group_id
if chord:
opts['chord'] = chord
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
index c15dec83d60..b90321572f3 100644
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -1,5 +1,5 @@
import json
-from unittest.mock import MagicMock, Mock, patch
+from unittest.mock import MagicMock, Mock, patch, sentinel
import pytest
@@ -154,6 +154,29 @@ def test_merge_immutable(self):
assert kwargs == {'foo': 1}
assert options == {'task_id': 3}
+ def test_merge_options__none(self):
+ sig = self.add.si()
+ _, _, new_options = sig._merge()
+ assert new_options is sig.options
+ _, _, new_options = sig._merge(options=None)
+ assert new_options is sig.options
+
+ @pytest.mark.parametrize("immutable_sig", (True, False))
+ def test_merge_options__group_id(self, immutable_sig):
+ # This is to avoid testing the behaviour in `test_set_immutable()`
+ if immutable_sig:
+ sig = self.add.si()
+ else:
+ sig = self.add.s()
+ # If the signature has no group ID, it can be set
+ assert not sig.options
+ _, _, new_options = sig._merge(options={"group_id": sentinel.gid})
+ assert new_options == {"group_id": sentinel.gid}
+ # But if one is already set, the new one is silently ignored
+ sig.set(group_id=sentinel.old_gid)
+ _, _, new_options = sig._merge(options={"group_id": sentinel.new_gid})
+ assert new_options == {"group_id": sentinel.old_gid}
+
def test_set_immutable(self):
x = self.add.s(2, 2)
assert not x.immutable
From a7af4b28c4151fde2de70802a0ba2b10efc836d8 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Fri, 18 Sep 2020 14:38:46 +1000
Subject: [PATCH 073/157] fix: Count chord "final elements" correctly
This change amends the implementation of `chord.__length_hint__()` to
ensure that all child task types are correctly counted. Specifically:
* all sub-tasks of a group are counted recursively
* the final task of a chain is counted recursively
* the body of a chord is counted recursively
* all other simple signatures count as a single "final element"
There is also a deserialisation step if a `dict` is seen while counting
the final elements in a chord, however this should become less important
with the merge of #6342 which ensures that tasks are recursively
deserialized by `.from_dict()`.
---
celery/canvas.py | 35 ++++---
t/integration/test_canvas.py | 22 +++++
t/unit/tasks/test_canvas.py | 181 +++++++++++++++++++++++++++++++++--
3 files changed, 218 insertions(+), 20 deletions(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index f767de1ce0a..2150d0e872d 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -1383,21 +1383,30 @@ def apply(self, args=None, kwargs=None,
args=(tasks.apply(args, kwargs).get(propagate=propagate),),
)
- def _traverse_tasks(self, tasks, value=None):
- stack = deque(tasks)
- while stack:
- task = stack.popleft()
- if isinstance(task, group):
- stack.extend(task.tasks)
- elif isinstance(task, _chain) and isinstance(task.tasks[-1], group):
- stack.extend(task.tasks[-1].tasks)
- else:
- yield task if value is None else value
+ @classmethod
+ def __descend(cls, sig_obj):
+ # Sometimes serialized signatures might make their way here
+ if not isinstance(sig_obj, Signature) and isinstance(sig_obj, dict):
+ sig_obj = Signature.from_dict(sig_obj)
+ if isinstance(sig_obj, group):
+ # Each task in a group counts toward this chord
+ subtasks = getattr(sig_obj.tasks, "tasks", sig_obj.tasks)
+ return sum(cls.__descend(task) for task in subtasks)
+ elif isinstance(sig_obj, _chain):
+ # The last element in a chain counts toward this chord
+ return cls.__descend(sig_obj.tasks[-1])
+ elif isinstance(sig_obj, chord):
+ # The child chord's body counts toward this chord
+ return cls.__descend(sig_obj.body)
+ elif isinstance(sig_obj, Signature):
+ # Each simple signature counts as 1 completion for this chord
+ return 1
+ # Any other types are assumed to be iterables of simple signatures
+ return len(sig_obj)
def __length_hint__(self):
- tasks = (self.tasks.tasks if isinstance(self.tasks, group)
- else self.tasks)
- return sum(self._traverse_tasks(tasks, 1))
+ tasks = getattr(self.tasks, "tasks", self.tasks)
+ return sum(self.__descend(task) for task in tasks)
def run(self, header, body, partial_args, app=None, interval=None,
countdown=1, max_retries=None, eager=False,
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index 6b1f316b03e..c7e00b196a7 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1009,3 +1009,25 @@ def test_priority_chain(self, manager):
c = return_priority.signature(priority=3) | return_priority.signature(
priority=5)
assert c().get(timeout=TIMEOUT) == "Priority: 5"
+
+ def test_nested_chord_group_chain_group_tail(self, manager):
+ """
+ Sanity check that a deeply nested group is completed as expected.
+
+ Groups at the end of chains nested in chords have had issues and this
+ simple test sanity check that such a tsk structure can be completed.
+ """
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chord(group(chain(
+ identity.s(42), # -> 42
+ group(
+ identity.s(), # -> 42
+ identity.s(), # -> 42
+ ), # [42, 42]
+ )), identity.s()) # [[42, 42]]
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [[42, 42]]
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
index b90321572f3..f51efab9389 100644
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -1,5 +1,5 @@
import json
-from unittest.mock import MagicMock, Mock, patch, sentinel
+from unittest.mock import MagicMock, Mock, call, patch, sentinel
import pytest
@@ -808,12 +808,179 @@ def test_app_fallback_to_current(self):
x = chord([t1], body=t1)
assert x.app is current_app
- def test_chord_size_with_groups(self):
- x = chord([
- self.add.s(2, 2) | group([self.add.si(2, 2), self.add.si(2, 2)]),
- self.add.s(2, 2) | group([self.add.si(2, 2), self.add.si(2, 2)]),
- ], body=self.add.si(2, 2))
- assert x.__length_hint__() == 4
+ def test_chord_size_simple(self):
+ sig = chord(self.add.s())
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_with_body(self):
+ sig = chord(self.add.s(), self.add.s())
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_explicit_group_single(self):
+ sig = chord(group(self.add.s()))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_explicit_group_many(self):
+ sig = chord(group([self.add.s()] * 42))
+ assert sig.__length_hint__() == 42
+
+ def test_chord_size_implicit_group_single(self):
+ sig = chord([self.add.s()])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_implicit_group_many(self):
+ sig = chord([self.add.s()] * 42)
+ assert sig.__length_hint__() == 42
+
+ def test_chord_size_chain_single(self):
+ sig = chord(chain(self.add.s()))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_chain_many(self):
+ # Chains get flattened into the encapsulating chord so even though the
+ # chain would only count for 1, the tasks we pulled into the chord's
+ # header and are counted as a bunch of simple signature objects
+ sig = chord(chain([self.add.s()] * 42))
+ assert sig.__length_hint__() == 42
+
+ def test_chord_size_nested_chain_chain_single(self):
+ sig = chord(chain(chain(self.add.s())))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_chain_chain_many(self):
+ # The outer chain will be pulled up into the chord but the lower one
+ # remains and will only count as a single final element
+ sig = chord(chain(chain([self.add.s()] * 42)))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_implicit_chain_single(self):
+ sig = chord([self.add.s()])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_implicit_chain_many(self):
+ # This isn't a chain object so the `tasks` attribute can't be lifted
+ # into the chord - this isn't actually valid and would blow up we tried
+ # to run it but it sanity checks our recursion
+ sig = chord([[self.add.s()] * 42])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_implicit_chain_chain_single(self):
+ sig = chord([chain(self.add.s())])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_implicit_chain_chain_many(self):
+ sig = chord([chain([self.add.s()] * 42)])
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_chord_body_simple(self):
+ sig = chord(chord(tuple(), self.add.s()))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_chord_body_implicit_group_single(self):
+ sig = chord(chord(tuple(), [self.add.s()]))
+ assert sig.__length_hint__() == 1
+
+ def test_chord_size_nested_chord_body_implicit_group_many(self):
+ sig = chord(chord(tuple(), [self.add.s()] * 42))
+ assert sig.__length_hint__() == 42
+
+ # Nested groups in a chain only affect the chord size if they are the last
+ # element in the chain - in that case each group element is counted
+ def test_chord_size_nested_group_chain_group_head_single(self):
+ x = chord(
+ group(
+ [group(self.add.s()) | self.add.s()] * 42
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 42
+
+ def test_chord_size_nested_group_chain_group_head_many(self):
+ x = chord(
+ group(
+ [group([self.add.s()] * 4) | self.add.s()] * 2
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 2
+
+ def test_chord_size_nested_group_chain_group_mid_single(self):
+ x = chord(
+ group(
+ [self.add.s() | group(self.add.s()) | self.add.s()] * 42
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 42
+
+ def test_chord_size_nested_group_chain_group_mid_many(self):
+ x = chord(
+ group(
+ [self.add.s() | group([self.add.s()] * 4) | self.add.s()] * 2
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 2
+
+ def test_chord_size_nested_group_chain_group_tail_single(self):
+ x = chord(
+ group(
+ [self.add.s() | group(self.add.s())] * 42
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 42
+
+ def test_chord_size_nested_group_chain_group_tail_many(self):
+ x = chord(
+ group(
+ [self.add.s() | group([self.add.s()] * 4)] * 2
+ ),
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 4 * 2
+
+ def test_chord_size_nested_implicit_group_chain_group_tail_single(self):
+ x = chord(
+ [self.add.s() | group(self.add.s())] * 42,
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 42
+
+ def test_chord_size_nested_implicit_group_chain_group_tail_many(self):
+ x = chord(
+ [self.add.s() | group([self.add.s()] * 4)] * 2,
+ body=self.add.s()
+ )
+ assert x.__length_hint__() == 4 * 2
+
+ def test_chord_size_deserialized_element_single(self):
+ child_sig = self.add.s()
+ deserialized_child_sig = json.loads(json.dumps(child_sig))
+ # We have to break in to be sure that a child remains as a `dict` so we
+ # can confirm that the length hint will instantiate a `Signature`
+ # object and then descend as expected
+ chord_sig = chord(tuple())
+ chord_sig.tasks = [deserialized_child_sig]
+ with patch(
+ "celery.canvas.Signature.from_dict", return_value=child_sig
+ ) as mock_from_dict:
+ assert chord_sig. __length_hint__() == 1
+ mock_from_dict.assert_called_once_with(deserialized_child_sig)
+
+ def test_chord_size_deserialized_element_many(self):
+ child_sig = self.add.s()
+ deserialized_child_sig = json.loads(json.dumps(child_sig))
+ # We have to break in to be sure that a child remains as a `dict` so we
+ # can confirm that the length hint will instantiate a `Signature`
+ # object and then descend as expected
+ chord_sig = chord(tuple())
+ chord_sig.tasks = [deserialized_child_sig] * 42
+ with patch(
+ "celery.canvas.Signature.from_dict", return_value=child_sig
+ ) as mock_from_dict:
+ assert chord_sig. __length_hint__() == 42
+ mock_from_dict.assert_has_calls([call(deserialized_child_sig)] * 42)
def test_set_immutable(self):
x = chord([Mock(name='t1'), Mock(name='t2')], app=self.app)
From 53e032d28c504a0beda4a497c22f41bce5090594 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Tue, 13 Oct 2020 16:37:53 +1100
Subject: [PATCH 074/157] test: Add more integration tests for groups
These tests are intended to show that group unrolling should be
respected in various ways by all backends. They should make it more
clear what behaviour we should be expecting from nested canvas
components and ensure that all the implementations (mostly relevant to
chords and `on_chord_part_return()` code) behave sensibly.
---
t/integration/test_canvas.py | 73 +++++++++++++++++++++++++++++++++---
1 file changed, 67 insertions(+), 6 deletions(-)
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index c7e00b196a7..96b112af8f9 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -423,6 +423,34 @@ def test_nested_chain_group_lone(self, manager):
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 42]
+ def test_nested_chain_group_mid(self, manager):
+ """
+ Test that a mid-point group in a chain completes.
+ """
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ identity.s(42), # 42
+ group(identity.s(), identity.s()), # [42, 42]
+ identity.s(), # [42, 42]
+ )
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
+ def test_nested_chain_group_last(self, manager):
+ """
+ Test that a final group in a chain with preceding tasks completes.
+ """
+ sig = chain(
+ identity.s(42), # 42
+ group(identity.s(), identity.s()), # [42, 42]
+ )
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
class test_result_set:
@@ -522,6 +550,16 @@ def test_group_lone(self, manager):
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 42]
+ def test_nested_group_group(self, manager):
+ """
+ Confirm that groups nested inside groups get unrolled.
+ """
+ sig = group(
+ group(identity.s(42), identity.s(42)), # [42, 42]
+ ) # [42, 42] due to unrolling
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
def assert_ids(r, expected_value, expected_root_id, expected_parent_id):
root_id, parent_id, value = r.get(timeout=TIMEOUT)
@@ -1010,6 +1048,24 @@ def test_priority_chain(self, manager):
priority=5)
assert c().get(timeout=TIMEOUT) == "Priority: 5"
+ def test_nested_chord_group(self, manager):
+ """
+ Confirm that groups nested inside chords get unrolled.
+ """
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chord(
+ (
+ group(identity.s(42), identity.s(42)), # [42, 42]
+ ),
+ identity.s() # [42, 42]
+ )
+ res = sig.delay()
+ assert res.get(timeout=TIMEOUT) == [42, 42]
+
def test_nested_chord_group_chain_group_tail(self, manager):
"""
Sanity check that a deeply nested group is completed as expected.
@@ -1022,12 +1078,17 @@ def test_nested_chord_group_chain_group_tail(self, manager):
except NotImplementedError as e:
raise pytest.skip(e.args[0])
- sig = chord(group(chain(
- identity.s(42), # -> 42
+ sig = chord(
group(
- identity.s(), # -> 42
- identity.s(), # -> 42
- ), # [42, 42]
- )), identity.s()) # [[42, 42]]
+ chain(
+ identity.s(42), # 42
+ group(
+ identity.s(), # 42
+ identity.s(), # 42
+ ), # [42, 42]
+ ), # [42, 42]
+ ), # [[42, 42]] since the chain prevents unrolling
+ identity.s(), # [[42, 42]]
+ )
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [[42, 42]]
From ea2a803d57524db1edf0ecf81164e3c61fc8935b Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Tue, 13 Oct 2020 16:44:13 +1100
Subject: [PATCH 075/157] test: Fix old markings for chord tests
---
t/integration/test_canvas.py | 16 ++++++----------
1 file changed, 6 insertions(+), 10 deletions(-)
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index 96b112af8f9..690acef352c 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1,4 +1,3 @@
-import os
from datetime import datetime, timedelta
from time import sleep
@@ -6,7 +5,7 @@
from celery import chain, chord, group, signature
from celery.backends.base import BaseKeyValueStoreBackend
-from celery.exceptions import ChordError, TimeoutError
+from celery.exceptions import TimeoutError
from celery.result import AsyncResult, GroupResult, ResultSet
from .conftest import get_active_redis_channels, get_redis_connection
@@ -691,10 +690,12 @@ def test_eager_chord_inside_task(self, manager):
chord_add.app.conf.task_always_eager = prev
- @flaky
def test_group_chain(self, manager):
- if not manager.app.conf.result_backend.startswith('redis'):
- raise pytest.skip('Requires redis result backend.')
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
c = (
add.s(2, 2) |
group(add.s(i) for i in range(4)) |
@@ -703,11 +704,6 @@ def test_group_chain(self, manager):
res = c()
assert res.get(timeout=TIMEOUT) == [12, 13, 14, 15]
- @flaky
- @pytest.mark.xfail(os.environ['TEST_BACKEND'] == 'cache+pylibmc://',
- reason="Not supported yet by the cache backend.",
- strict=True,
- raises=ChordError)
def test_nested_group_chain(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
From 62f37133c8681584ae6f7b3499551b52ab369ea2 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Tue, 13 Oct 2020 16:23:44 +1100
Subject: [PATCH 076/157] fix: Make KV-store backends respect chord size
This avoids an issue where the `on_chord_part_return()` implementation
would check the the length of the result of a chain ending in a nested
group. This would manifest in behaviour where a worker would be blocked
waiting for for the result object it holds to complete since it would
attempt to `.join()` the result object. In situations with plenty of
workers, this wouldn't really cause any noticable issue apart from some
latency or unpredictable failures - but in concurrency constrained
situations like the integrations tests, it causes deadlocks.
We know from previous commits in this series that chord completion is
more complex than just waiting for a direct child, so we correct the
`size` value in `BaseKeyValueStoreBackend.on_chord_part_return()` to
respect the `chord_size` value from the request, falling back to the
length of the `deps` if that value is missing for some reason (this is
necessary to keep a number of the tests happy but it's not clear to me
if that will ever be the case in real life situations).
---
celery/backends/base.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/celery/backends/base.py b/celery/backends/base.py
index 28e5b2a4d6b..74fce23c3c4 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -919,7 +919,11 @@ def on_chord_part_return(self, request, state, result, **kwargs):
ChordError(f'GroupResult {gid} no longer exists'),
)
val = self.incr(key)
- size = len(deps)
+ # Set the chord size to the value defined in the request, or fall back
+ # to the number of dependencies we can see from the restored result
+ size = request.chord.get("chord_size")
+ if size is None:
+ size = len(deps)
if val > size: # pragma: no cover
logger.warning('Chord counter incremented too many times for %r',
gid)
From beddbeef16f76b72cbfe89e7fd1a1375f7305dfa Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Wed, 14 Oct 2020 10:39:19 +1100
Subject: [PATCH 077/157] fix: Retain chord header result structure in Redis
This change fixes the chord result flattening issue which manifested
when using the Redis backend due to its deliberate throwing away of
information about the header result structure. Rather than assuming that
all results which contribute to the finalisation of a chord should be
siblings, this change checks if any are complex (ie. `GroupResult`s) and
falls back to behaviour similar to that implemented in the
`KeyValueStoreBackend` which restores the original `GroupResult` object
and `join()`s it.
We retain the original behaviour which is billed as an optimisation in
f09b041. We could behave better in the complex header result case by not
bothering to stash the results of contributing tasks under the `.j` zset
since we won't be using them, but without checking for the presence of
the complex group result on every `on_chord_part_return()` call, we
can't be sure that we won't need those stashed results later on. This
would be an opportunity for optimisation in future if we were to use an
`EVAL` to only do the `zadd()` if the group result key doesn't exist.
However, avoiding the result encoding work in `on_chord_part_return()`
would be more complicated. For now, it's not worth the brainpower.
This change also slightly refactors the redis backend unit tests to make
it easier to build fixtures and hit both the complex and simple result
structure cases.
---
celery/backends/redis.py | 65 ++++++++----
t/integration/test_canvas.py | 13 ++-
t/unit/backends/test_redis.py | 193 ++++++++++++++++++++++++----------
3 files changed, 194 insertions(+), 77 deletions(-)
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
index 2c428823538..dd3677f569c 100644
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -13,6 +13,7 @@
from celery._state import task_join_will_block
from celery.canvas import maybe_signature
from celery.exceptions import ChordError, ImproperlyConfigured
+from celery.result import GroupResult, allow_join_result
from celery.utils.functional import dictfilter
from celery.utils.log import get_logger
from celery.utils.time import humanize_seconds
@@ -401,12 +402,14 @@ def _unpack_chord_result(self, tup, decode,
return retval
def apply_chord(self, header_result, body, **kwargs):
- # Overrides this to avoid calling GroupResult.save
- # pylint: disable=method-hidden
- # Note that KeyValueStoreBackend.__init__ sets self.apply_chord
- # if the implements_incr attr is set. Redis backend doesn't set
- # this flag.
- pass
+ # If any of the child results of this chord are complex (ie. group
+ # results themselves), we need to save `header_result` to ensure that
+ # the expected structure is retained when we finish the chord and pass
+ # the results onward to the body in `on_chord_part_return()`. We don't
+ # do this is all cases to retain an optimisation in the common case
+ # where a chord header is comprised of simple result objects.
+ if any(isinstance(nr, GroupResult) for nr in header_result.results):
+ header_result.save(backend=self)
@cached_property
def _chord_zset(self):
@@ -449,20 +452,38 @@ def on_chord_part_return(self, request, state, result,
callback = maybe_signature(request.chord, app=app)
total = callback['chord_size'] + totaldiff
if readycount == total:
- decode, unpack = self.decode, self._unpack_chord_result
- with client.pipeline() as pipe:
- if self._chord_zset:
- pipeline = pipe.zrange(jkey, 0, -1)
- else:
- pipeline = pipe.lrange(jkey, 0, total)
- resl, = pipeline.execute()
- try:
- callback.delay([unpack(tup, decode) for tup in resl])
+ header_result = GroupResult.restore(gid)
+ if header_result is not None:
+ # If we manage to restore a `GroupResult`, then it must
+ # have been complex and saved by `apply_chord()` earlier.
+ #
+ # Before we can join the `GroupResult`, it needs to be
+ # manually marked as ready to avoid blocking
+ header_result.on_ready()
+ # We'll `join()` it to get the results and ensure they are
+ # structured as intended rather than the flattened version
+ # we'd construct without any other information.
+ join_func = (
+ header_result.join_native
+ if header_result.supports_native_join
+ else header_result.join
+ )
+ with allow_join_result():
+ resl = join_func(timeout=3.0, propagate=True)
+ else:
+ # Otherwise simply extract and decode the results we
+ # stashed along the way, which should be faster for large
+ # numbers of simple results in the chord header.
+ decode, unpack = self.decode, self._unpack_chord_result
with client.pipeline() as pipe:
- _, _ = pipe \
- .delete(jkey) \
- .delete(tkey) \
- .execute()
+ if self._chord_zset:
+ pipeline = pipe.zrange(jkey, 0, -1)
+ else:
+ pipeline = pipe.lrange(jkey, 0, total)
+ resl, = pipeline.execute()
+ resl = [unpack(tup, decode) for tup in resl]
+ try:
+ callback.delay(resl)
except Exception as exc: # pylint: disable=broad-except
logger.exception(
'Chord callback for %r raised: %r', request.group, exc)
@@ -470,6 +491,12 @@ def on_chord_part_return(self, request, state, result,
callback,
ChordError(f'Callback error: {exc!r}'),
)
+ finally:
+ with client.pipeline() as pipe:
+ _, _ = pipe \
+ .delete(jkey) \
+ .delete(tkey) \
+ .execute()
except ChordError as exc:
logger.exception('Chord %r raised: %r', request.group, exc)
return self.chord_error_from_stack(callback, exc)
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index 690acef352c..256ecdbd9ee 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1,3 +1,4 @@
+import re
from datetime import datetime, timedelta
from time import sleep
@@ -892,9 +893,15 @@ def test_chord_on_error(self, manager):
# So for clarity of our test, we instead do it here.
# Use the error callback's result to find the failed task.
- error_callback_result = AsyncResult(
- res.children[0].children[0].result[0])
- failed_task_id = error_callback_result.result.args[0].split()[3]
+ uuid_patt = re.compile(
+ r"[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}"
+ )
+ callback_chord_exc = AsyncResult(
+ res.children[0].children[0].result[0]
+ ).result
+ failed_task_id = uuid_patt.search(str(callback_chord_exc))
+ assert (failed_task_id is not None), "No task ID in %r" % callback_exc
+ failed_task_id = failed_task_id.group()
# Use new group_id result metadata to get group ID.
failed_task_result = AsyncResult(failed_task_id)
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
index 2029edc3c29..f534077a4fd 100644
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -1,3 +1,4 @@
+import itertools
import json
import random
import ssl
@@ -274,7 +275,7 @@ def test_drain_events_connection_error(self, parent_on_state_change, cancel_for)
assert consumer._pubsub._subscribed_to == {b'celery-task-meta-initial'}
-class test_RedisBackend:
+class basetest_RedisBackend:
def get_backend(self):
from celery.backends.redis import RedisBackend
@@ -287,11 +288,42 @@ def get_E_LOST(self):
from celery.backends.redis import E_LOST
return E_LOST
+ def create_task(self, i, group_id="group_id"):
+ tid = uuid()
+ task = Mock(name=f'task-{tid}')
+ task.name = 'foobarbaz'
+ self.app.tasks['foobarbaz'] = task
+ task.request.chord = signature(task)
+ task.request.id = tid
+ task.request.chord['chord_size'] = 10
+ task.request.group = group_id
+ task.request.group_index = i
+ return task
+
+ @contextmanager
+ def chord_context(self, size=1):
+ with patch('celery.backends.redis.maybe_signature') as ms:
+ request = Mock(name='request')
+ request.id = 'id1'
+ request.group = 'gid1'
+ request.group_index = None
+ tasks = [
+ self.create_task(i, group_id=request.group)
+ for i in range(size)
+ ]
+ callback = ms.return_value = Signature('add')
+ callback.id = 'id1'
+ callback['chord_size'] = size
+ callback.delay = Mock(name='callback.delay')
+ yield tasks, request, callback
+
def setup(self):
self.Backend = self.get_backend()
self.E_LOST = self.get_E_LOST()
self.b = self.Backend(app=self.app)
+
+class test_RedisBackend(basetest_RedisBackend):
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce(self):
pytest.importorskip('redis')
@@ -623,20 +655,36 @@ def test_set_no_expire(self):
self.b.expires = None
self.b._set_with_state('foo', 'bar', states.SUCCESS)
- def create_task(self, i):
+ def test_process_cleanup(self):
+ self.b.process_cleanup()
+
+ def test_get_set_forget(self):
tid = uuid()
- task = Mock(name=f'task-{tid}')
- task.name = 'foobarbaz'
- self.app.tasks['foobarbaz'] = task
- task.request.chord = signature(task)
- task.request.id = tid
- task.request.chord['chord_size'] = 10
- task.request.group = 'group_id'
- task.request.group_index = i
- return task
+ self.b.store_result(tid, 42, states.SUCCESS)
+ assert self.b.get_state(tid) == states.SUCCESS
+ assert self.b.get_result(tid) == 42
+ self.b.forget(tid)
+ assert self.b.get_state(tid) == states.PENDING
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return(self, restore):
+ def test_set_expires(self):
+ self.b = self.Backend(expires=512, app=self.app)
+ tid = uuid()
+ key = self.b.get_key_for_task(tid)
+ self.b.store_result(tid, 42, states.SUCCESS)
+ self.b.client.expire.assert_called_with(
+ key, 512,
+ )
+
+
+class test_RedisBackend_chords_simple(basetest_RedisBackend):
+ @pytest.fixture(scope="class", autouse=True)
+ def simple_header_result(self):
+ with patch(
+ "celery.result.GroupResult.restore", return_value=None,
+ ) as p:
+ yield p
+
+ def test_on_chord_part_return(self):
tasks = [self.create_task(i) for i in range(10)]
random.shuffle(tasks)
@@ -652,8 +700,7 @@ def test_on_chord_part_return(self, restore):
call(jkey, 86400), call(tkey, 86400),
])
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return__unordered(self, restore):
+ def test_on_chord_part_return__unordered(self):
self.app.conf.result_backend_transport_options = dict(
result_chord_ordered=False,
)
@@ -673,8 +720,7 @@ def test_on_chord_part_return__unordered(self, restore):
call(jkey, 86400), call(tkey, 86400),
])
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return__ordered(self, restore):
+ def test_on_chord_part_return__ordered(self):
self.app.conf.result_backend_transport_options = dict(
result_chord_ordered=True,
)
@@ -694,8 +740,7 @@ def test_on_chord_part_return__ordered(self, restore):
call(jkey, 86400), call(tkey, 86400),
])
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return_no_expiry(self, restore):
+ def test_on_chord_part_return_no_expiry(self):
old_expires = self.b.expires
self.b.expires = None
tasks = [self.create_task(i) for i in range(10)]
@@ -712,8 +757,7 @@ def test_on_chord_part_return_no_expiry(self, restore):
self.b.expires = old_expires
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return_expire_set_to_zero(self, restore):
+ def test_on_chord_part_return_expire_set_to_zero(self):
old_expires = self.b.expires
self.b.expires = 0
tasks = [self.create_task(i) for i in range(10)]
@@ -730,8 +774,7 @@ def test_on_chord_part_return_expire_set_to_zero(self, restore):
self.b.expires = old_expires
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return_no_expiry__unordered(self, restore):
+ def test_on_chord_part_return_no_expiry__unordered(self):
self.app.conf.result_backend_transport_options = dict(
result_chord_ordered=False,
)
@@ -752,8 +795,7 @@ def test_on_chord_part_return_no_expiry__unordered(self, restore):
self.b.expires = old_expires
- @patch('celery.result.GroupResult.restore')
- def test_on_chord_part_return_no_expiry__ordered(self, restore):
+ def test_on_chord_part_return_no_expiry__ordered(self):
self.app.conf.result_backend_transport_options = dict(
result_chord_ordered=True,
)
@@ -926,39 +968,80 @@ def test_on_chord_part_return__other_error__ordered(self):
callback.id, exc=ANY,
)
- @contextmanager
- def chord_context(self, size=1):
- with patch('celery.backends.redis.maybe_signature') as ms:
- tasks = [self.create_task(i) for i in range(size)]
- request = Mock(name='request')
- request.id = 'id1'
- request.group = 'gid1'
- request.group_index = None
- callback = ms.return_value = Signature('add')
- callback.id = 'id1'
- callback['chord_size'] = size
- callback.delay = Mock(name='callback.delay')
- yield tasks, request, callback
- def test_process_cleanup(self):
- self.b.process_cleanup()
+class test_RedisBackend_chords_complex(basetest_RedisBackend):
+ @pytest.fixture(scope="function", autouse=True)
+ def complex_header_result(self):
+ with patch("celery.result.GroupResult.restore") as p:
+ yield p
+
+ def test_apply_chord_complex_header(self):
+ mock_header_result = Mock()
+ # No results in the header at all - won't call `save()`
+ mock_header_result.results = tuple()
+ self.b.apply_chord(mock_header_result, None)
+ mock_header_result.save.assert_not_called()
+ mock_header_result.save.reset_mock()
+ # A single simple result in the header - won't call `save()`
+ mock_header_result.results = (self.app.AsyncResult("foo"), )
+ self.b.apply_chord(mock_header_result, None)
+ mock_header_result.save.assert_not_called()
+ mock_header_result.save.reset_mock()
+ # Many simple results in the header - won't call `save()`
+ mock_header_result.results = (self.app.AsyncResult("foo"), ) * 42
+ self.b.apply_chord(mock_header_result, None)
+ mock_header_result.save.assert_not_called()
+ mock_header_result.save.reset_mock()
+ # A single complex result in the header - will call `save()`
+ mock_header_result.results = (self.app.GroupResult("foo"), )
+ self.b.apply_chord(mock_header_result, None)
+ mock_header_result.save.assert_called_once_with(backend=self.b)
+ mock_header_result.save.reset_mock()
+ # Many complex results in the header - will call `save()`
+ mock_header_result.results = (self.app.GroupResult("foo"), ) * 42
+ self.b.apply_chord(mock_header_result, None)
+ mock_header_result.save.assert_called_once_with(backend=self.b)
+ mock_header_result.save.reset_mock()
+ # Mixed simple and complex results in the header - will call `save()`
+ mock_header_result.results = itertools.islice(
+ itertools.cycle((
+ self.app.AsyncResult("foo"), self.app.GroupResult("foo"),
+ )), 42,
+ )
+ self.b.apply_chord(mock_header_result, None)
+ mock_header_result.save.assert_called_once_with(backend=self.b)
+ mock_header_result.save.reset_mock()
- def test_get_set_forget(self):
- tid = uuid()
- self.b.store_result(tid, 42, states.SUCCESS)
- assert self.b.get_state(tid) == states.SUCCESS
- assert self.b.get_result(tid) == 42
- self.b.forget(tid)
- assert self.b.get_state(tid) == states.PENDING
+ @pytest.mark.parametrize("supports_native_join", (True, False))
+ def test_on_chord_part_return(
+ self, complex_header_result, supports_native_join,
+ ):
+ mock_result_obj = complex_header_result.return_value
+ mock_result_obj.supports_native_join = supports_native_join
- def test_set_expires(self):
- self.b = self.Backend(expires=512, app=self.app)
- tid = uuid()
- key = self.b.get_key_for_task(tid)
- self.b.store_result(tid, 42, states.SUCCESS)
- self.b.client.expire.assert_called_with(
- key, 512,
- )
+ tasks = [self.create_task(i) for i in range(10)]
+ random.shuffle(tasks)
+
+ with self.chord_context(10) as (tasks, request, callback):
+ for task, result_val in zip(tasks, itertools.cycle((42, ))):
+ self.b.on_chord_part_return(
+ task.request, states.SUCCESS, result_val,
+ )
+ # Confirm that `zadd` was called even though we won't end up
+ # using the data pushed into the sorted set
+ assert self.b.client.zadd.call_count == 1
+ self.b.client.zadd.reset_mock()
+ # Confirm that neither `zrange` not `lrange` were called
+ self.b.client.zrange.assert_not_called()
+ self.b.client.lrange.assert_not_called()
+ # Confirm that the `GroupResult.restore` mock was called
+ complex_header_result.assert_called_once_with(request.group)
+ # Confirm the the callback was called with the `join()`ed group result
+ if supports_native_join:
+ expected_join = mock_result_obj.join_native
+ else:
+ expected_join = mock_result_obj.join
+ callback.delay.assert_called_once_with(expected_join())
class test_SentinelBackend:
From b27ac4ab07859b987848f02a5e5ef0c0853ee9da Mon Sep 17 00:00:00 2001
From: Lewis Kabui
Date: Wed, 14 Oct 2020 12:12:24 +0300
Subject: [PATCH 078/157] Update obsolete --loglevel argument values in docs
---
docs/getting-started/first-steps-with-celery.rst | 2 +-
docs/userguide/periodic-tasks.rst | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/getting-started/first-steps-with-celery.rst b/docs/getting-started/first-steps-with-celery.rst
index f3b80c42dbe..aefaa4aa867 100644
--- a/docs/getting-started/first-steps-with-celery.rst
+++ b/docs/getting-started/first-steps-with-celery.rst
@@ -159,7 +159,7 @@ argument:
.. code-block:: console
- $ celery -A tasks worker --loglevel=info
+ $ celery -A tasks worker --loglevel=INFO
.. note::
diff --git a/docs/userguide/periodic-tasks.rst b/docs/userguide/periodic-tasks.rst
index e68bcd26c50..1e346ed2557 100644
--- a/docs/userguide/periodic-tasks.rst
+++ b/docs/userguide/periodic-tasks.rst
@@ -463,7 +463,7 @@ To install and use this extension:
.. code-block:: console
- $ celery -A proj beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler
+ $ celery -A proj beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler
Note: You may also add this as the :setting:`beat_scheduler` setting directly.
From f2825b4918d55a40e6f839f058ee353db3b90b36 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 15 Oct 2020 12:22:38 +0300
Subject: [PATCH 079/157] Set logfile, not loglevel.
---
extra/systemd/celery.service | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/extra/systemd/celery.service b/extra/systemd/celery.service
index 2510fb83cb0..ff6bacb89ed 100644
--- a/extra/systemd/celery.service
+++ b/extra/systemd/celery.service
@@ -12,7 +12,7 @@ ExecStart=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi start $CELERYD_NODES \
--pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
--loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
ExecStop=/bin/sh -c '${CELERY_BIN} multi stopwait $CELERYD_NODES \
- --pidfile=${CELERYD_PID_FILE} --loglevel="${CELERYD_LOG_LEVEL}"'
+ --pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE}'
ExecReload=/bin/sh -c '${CELERY_BIN} -A $CELERY_APP multi restart $CELERYD_NODES \
--pidfile=${CELERYD_PID_FILE} --logfile=${CELERYD_LOG_FILE} \
--loglevel="${CELERYD_LOG_LEVEL}" $CELERYD_OPTS'
From 05da357502a109c05b35392391299d75d181ccab Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 15 Oct 2020 12:24:45 +0300
Subject: [PATCH 080/157] Mention removed deprecated modules in the release
notes.
Fixes #6406.
---
docs/whatsnew-5.0.rst | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index af8dd18fa5d..9360a5b9588 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -250,6 +250,18 @@ AMQP Result Backend
The AMQP result backend has been removed as it was deprecated in version 4.0.
+Removed Deprecated Modules
+--------------------------
+
+The `celery.utils.encoding` and the `celery.task` modules has been deprecated
+in version 4.0 and therefore are removed in 5.0.
+
+If you were using the `celery.utils.encoding` module before,
+you should import `kombu.utils.encoding` instead.
+
+If you were using the `celery.task` module before, you should import directly
+from the `celery` module instead.
+
.. _new_command_line_interface:
New Command Line Interface
From d1305f3e45dca17ea0c0c025a3a77c6aa62ec71a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?David=20Pa=CC=88rsson?=
Date: Fri, 16 Oct 2020 11:00:23 +0200
Subject: [PATCH 081/157] Copy __annotations__ when creating tasks
This will allow getting type hints.
Fixes #6186.
---
celery/app/base.py | 1 +
t/unit/app/test_app.py | 10 ++++++++++
2 files changed, 11 insertions(+)
diff --git a/celery/app/base.py b/celery/app/base.py
index dc7c41d804f..3e33bb068e1 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -428,6 +428,7 @@ def _task_from_fun(self, fun, name=None, base=None, bind=False, **options):
'_decorated': True,
'__doc__': fun.__doc__,
'__module__': fun.__module__,
+ '__annotations__': fun.__annotations__,
'__header__': staticmethod(head_from_fun(fun, bound=bind)),
'__wrapped__': run}, **options))()
# for some reason __qualname__ cannot be set in type()
diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
index 9571b401254..969489fa164 100644
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -494,6 +494,16 @@ def foo():
finally:
_imports.MP_MAIN_FILE = None
+ def test_can_get_type_hints_for_tasks(self):
+ import typing
+
+ with self.Celery() as app:
+ @app.task
+ def foo(parameter: int) -> None:
+ pass
+
+ assert typing.get_type_hints(foo) == {'parameter': int, 'return': type(None)}
+
def test_annotate_decorator(self):
from celery.app.task import Task
From b57ac624b871e31db2994610e119720a2e167a2c Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Thu, 17 Sep 2020 11:13:44 +1000
Subject: [PATCH 082/157] test: Improve chord body group index freezing test
Add more elements to the body so we can verify that the `group_index`
counts up from 0 as expected. This change adds the `pytest-subtests`
package as a test dependency so we can define partially independent
subtests within test functions.
---
requirements/test.txt | 1 +
t/unit/tasks/test_canvas.py | 36 +++++++++++++++++++++++++-----------
2 files changed, 26 insertions(+), 11 deletions(-)
diff --git a/requirements/test.txt b/requirements/test.txt
index 8d338510e71..92ed354e4c8 100644
--- a/requirements/test.txt
+++ b/requirements/test.txt
@@ -1,6 +1,7 @@
case>=1.3.1
pytest~=6.0
pytest-celery
+pytest-subtests
pytest-timeout~=1.4.2
boto3>=1.9.178
moto==1.3.7
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
index f51efab9389..874339c4687 100644
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -2,6 +2,7 @@
from unittest.mock import MagicMock, Mock, call, patch, sentinel
import pytest
+import pytest_subtests # noqa: F401
from celery._state import _task_stack
from celery.canvas import (Signature, _chain, _maybe_group, chain, chord,
@@ -1005,22 +1006,35 @@ def test_repr(self):
x.kwargs['body'] = None
assert 'without body' in repr(x)
- def test_freeze_tasks_body_is_group(self):
- # Confirm that `group index` is passed from a chord to elements of its
- # body when the chord itself is encapsulated in a group
+ def test_freeze_tasks_body_is_group(self, subtests):
+ # Confirm that `group index` values counting up from 0 are set for
+ # elements of a chord's body when the chord is encapsulated in a group
body_elem = self.add.s()
- chord_body = group([body_elem])
+ chord_body = group([body_elem] * 42)
chord_obj = chord(self.add.s(), body=chord_body)
top_group = group([chord_obj])
# We expect the body to be the signature we passed in before we freeze
- (embedded_body_elem, ) = chord_obj.body.tasks
- assert embedded_body_elem is body_elem
- assert embedded_body_elem.options == dict()
- # When we freeze the chord, its body will be clones and options set
+ with subtests.test(msg="Validate body tasks are retained"):
+ assert all(
+ embedded_body_elem is body_elem
+ for embedded_body_elem in chord_obj.body.tasks
+ )
+ # We also expect the body to have no initial options - since all of the
+ # embedded body elements are confirmed to be `body_elem` this is valid
+ assert body_elem.options == {}
+ # When we freeze the chord, its body will be cloned and options set
top_group.freeze()
- (embedded_body_elem, ) = chord_obj.body.tasks
- assert embedded_body_elem is not body_elem
- assert embedded_body_elem.options["group_index"] == 0 # 0th task
+ with subtests.test(
+ msg="Validate body group indicies count from 0 after freezing"
+ ):
+ assert all(
+ embedded_body_elem is not body_elem
+ for embedded_body_elem in chord_obj.body.tasks
+ )
+ assert all(
+ embedded_body_elem.options["group_index"] == i
+ for i, embedded_body_elem in enumerate(chord_obj.body.tasks)
+ )
def test_freeze_tasks_is_not_group(self):
x = chord([self.add.s(2, 2)], body=self.add.s(), app=self.app)
From 89d50f5a34e3c9a16f6fd2cace4ac0dc214493dc Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Thu, 15 Oct 2020 09:21:59 +1100
Subject: [PATCH 083/157] test: Use all() for subtask checks in canvas tests
When we expect all of the tasks in some iterable to meet a conditional,
we should make that clear by using `all(condition for ...)`.
---
t/unit/tasks/test_canvas.py | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
index 874339c4687..23c805d157a 100644
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -327,13 +327,9 @@ def test_from_dict_no_tasks(self):
def test_from_dict_full_subtasks(self):
c = chain(self.add.si(1, 2), self.add.si(3, 4), self.add.si(5, 6))
-
serialized = json.loads(json.dumps(c))
-
deserialized = chain.from_dict(serialized)
-
- for task in deserialized.tasks:
- assert isinstance(task, Signature)
+ assert all(isinstance(task, Signature) for task in deserialized.tasks)
@pytest.mark.usefixtures('depends_on_current_app')
def test_app_falls_back_to_default(self):
@@ -346,9 +342,8 @@ def test_handles_dicts(self):
)
c.freeze()
tasks, _ = c._frozen
- for task in tasks:
- assert isinstance(task, Signature)
- assert task.app is self.app
+ assert all(isinstance(task, Signature) for task in tasks)
+ assert all(task.app is self.app for task in tasks)
def test_groups_in_chain_to_chord(self):
g1 = group([self.add.s(2, 2), self.add.s(4, 4)])
From 56acb7b22be559255e3e481851a8125726cbb4a9 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Wed, 16 Sep 2020 14:54:06 +1000
Subject: [PATCH 084/157] test: Add more tests for `from_dict()` variants
Notably, this exposed the bug tracked in #6341 where groups are not
deeply deserialized by `group.from_dict()`.
---
t/integration/tasks.py | 66 ++++++++++++++++-
t/integration/test_canvas.py | 102 +++++++++++++++++++++++++
t/unit/tasks/test_canvas.py | 139 +++++++++++++++++++++++++++++++++++
3 files changed, 306 insertions(+), 1 deletion(-)
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
index 629afaf2ece..1b4bb581b0c 100644
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -1,6 +1,6 @@
from time import sleep
-from celery import Task, chain, chord, group, shared_task
+from celery import Signature, Task, chain, chord, group, shared_task
from celery.exceptions import SoftTimeLimitExceeded
from celery.utils.log import get_task_logger
@@ -244,3 +244,67 @@ def run(self):
if self.request.retries:
return self.request.retries
raise ValueError()
+
+
+# The signatures returned by these tasks wouldn't actually run because the
+# arguments wouldn't be fulfilled - we never actually delay them so it's fine
+@shared_task
+def return_nested_signature_chain_chain():
+ return chain(chain([add.s()]))
+
+
+@shared_task
+def return_nested_signature_chain_group():
+ return chain(group([add.s()]))
+
+
+@shared_task
+def return_nested_signature_chain_chord():
+ return chain(chord([add.s()], add.s()))
+
+
+@shared_task
+def return_nested_signature_group_chain():
+ return group(chain([add.s()]))
+
+
+@shared_task
+def return_nested_signature_group_group():
+ return group(group([add.s()]))
+
+
+@shared_task
+def return_nested_signature_group_chord():
+ return group(chord([add.s()], add.s()))
+
+
+@shared_task
+def return_nested_signature_chord_chain():
+ return chord(chain([add.s()]), add.s())
+
+
+@shared_task
+def return_nested_signature_chord_group():
+ return chord(group([add.s()]), add.s())
+
+
+@shared_task
+def return_nested_signature_chord_chord():
+ return chord(chord([add.s()], add.s()), add.s())
+
+
+@shared_task
+def rebuild_signature(sig_dict):
+ sig_obj = Signature.from_dict(sig_dict)
+
+ def _recurse(sig):
+ if not isinstance(sig, Signature):
+ raise TypeError("{!r} is not a signature object".format(sig))
+ # Most canvas types have a `tasks` attribute
+ if isinstance(sig, (chain, group, chord)):
+ for task in sig.tasks:
+ _recurse(task)
+ # `chord`s also have a `body` attribute
+ if isinstance(sig, chord):
+ _recurse(sig.body)
+ _recurse(sig_obj)
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index 256ecdbd9ee..2de8c0aa428 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -9,6 +9,7 @@
from celery.exceptions import TimeoutError
from celery.result import AsyncResult, GroupResult, ResultSet
+from . import tasks
from .conftest import get_active_redis_channels, get_redis_connection
from .tasks import (ExpectedException, add, add_chord_to_chord, add_replaced,
add_to_all, add_to_all_to_chord, build_chain_inside_task,
@@ -1095,3 +1096,104 @@ def test_nested_chord_group_chain_group_tail(self, manager):
)
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [[42, 42]]
+
+
+class test_signature_serialization:
+ """
+ Confirm nested signatures can be rebuilt after passing through a backend.
+
+ These tests are expected to finish and return `None` or raise an exception
+ in the error case. The exception indicates that some element of a nested
+ signature object was not properly deserialized from its dictionary
+ representation, and would explode later on if it were used as a signature.
+ """
+ def test_rebuild_nested_chain_chain(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_chain_chain.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chain_group(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_chain_group.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chain_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chain_chord.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ @pytest.mark.xfail(reason="#6341")
+ def test_rebuild_nested_group_chain(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_group_chain.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ @pytest.mark.xfail(reason="#6341")
+ def test_rebuild_nested_group_group(self, manager):
+ sig = chain(
+ tasks.return_nested_signature_group_group.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ @pytest.mark.xfail(reason="#6341")
+ def test_rebuild_nested_group_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_group_chord.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chord_chain(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chord_chain.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chord_group(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chord_group.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
+
+ def test_rebuild_nested_chord_chord(self, manager):
+ try:
+ manager.app.backend.ensure_chords_allowed()
+ except NotImplementedError as e:
+ raise pytest.skip(e.args[0])
+
+ sig = chain(
+ tasks.return_nested_signature_chord_chord.s(),
+ tasks.rebuild_signature.s()
+ )
+ sig.delay().get(timeout=TIMEOUT)
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
index 23c805d157a..32c0af1db10 100644
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -694,6 +694,32 @@ def test_from_dict(self):
x['args'] = None
assert group.from_dict(dict(x))
+ @pytest.mark.xfail(reason="#6341")
+ def test_from_dict_deep_deserialize(self):
+ original_group = group([self.add.s(1, 2)] * 42)
+ serialized_group = json.loads(json.dumps(original_group))
+ deserialized_group = group.from_dict(serialized_group)
+ assert all(
+ isinstance(child_task, Signature)
+ for child_task in deserialized_group.tasks
+ )
+
+ @pytest.mark.xfail(reason="#6341")
+ def test_from_dict_deeper_deserialize(self):
+ inner_group = group([self.add.s(1, 2)] * 42)
+ outer_group = group([inner_group] * 42)
+ serialized_group = json.loads(json.dumps(outer_group))
+ deserialized_group = group.from_dict(serialized_group)
+ assert all(
+ isinstance(child_task, Signature)
+ for child_task in deserialized_group.tasks
+ )
+ assert all(
+ isinstance(grandchild_task, Signature)
+ for child_task in deserialized_group.tasks
+ for grandchild_task in child_task.tasks
+ )
+
def test_call_empty_group(self):
x = group(app=self.app)
assert not len(x())
@@ -1059,6 +1085,119 @@ def chord_add():
_state.task_join_will_block = fixture_task_join_will_block
result.task_join_will_block = fixture_task_join_will_block
+ def test_from_dict(self):
+ header = self.add.s(1, 2)
+ original_chord = chord(header=header)
+ rebuilt_chord = chord.from_dict(dict(original_chord))
+ assert isinstance(rebuilt_chord, chord)
+
+ def test_from_dict_with_body(self):
+ header = body = self.add.s(1, 2)
+ original_chord = chord(header=header, body=body)
+ rebuilt_chord = chord.from_dict(dict(original_chord))
+ assert isinstance(rebuilt_chord, chord)
+
+ def test_from_dict_deep_deserialize(self, subtests):
+ header = body = self.add.s(1, 2)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ with subtests.test(msg="Validate chord header tasks is deserialized"):
+ assert all(
+ isinstance(child_task, Signature)
+ for child_task in deserialized_chord.tasks
+ )
+ with subtests.test(msg="Verify chord body is deserialized"):
+ assert isinstance(deserialized_chord.body, Signature)
+
+ @pytest.mark.xfail(reason="#6341")
+ def test_from_dict_deep_deserialize_group(self, subtests):
+ header = body = group([self.add.s(1, 2)] * 42)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ # A header which is a group gets unpacked into the chord's `tasks`
+ with subtests.test(
+ msg="Validate chord header tasks are deserialized and unpacked"
+ ):
+ assert all(
+ isinstance(child_task, Signature)
+ and not isinstance(child_task, group)
+ for child_task in deserialized_chord.tasks
+ )
+ # A body which is a group remains as it we passed in
+ with subtests.test(
+ msg="Validate chord body is deserialized and not unpacked"
+ ):
+ assert isinstance(deserialized_chord.body, group)
+ assert all(
+ isinstance(body_child_task, Signature)
+ for body_child_task in deserialized_chord.body.tasks
+ )
+
+ @pytest.mark.xfail(reason="#6341")
+ def test_from_dict_deeper_deserialize_group(self, subtests):
+ inner_group = group([self.add.s(1, 2)] * 42)
+ header = body = group([inner_group] * 42)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ # A header which is a group gets unpacked into the chord's `tasks`
+ with subtests.test(
+ msg="Validate chord header tasks are deserialized and unpacked"
+ ):
+ assert all(
+ isinstance(child_task, group)
+ for child_task in deserialized_chord.tasks
+ )
+ assert all(
+ isinstance(grandchild_task, Signature)
+ for child_task in deserialized_chord.tasks
+ for grandchild_task in child_task.tasks
+ )
+ # A body which is a group remains as it we passed in
+ with subtests.test(
+ msg="Validate chord body is deserialized and not unpacked"
+ ):
+ assert isinstance(deserialized_chord.body, group)
+ assert all(
+ isinstance(body_child_task, group)
+ for body_child_task in deserialized_chord.body.tasks
+ )
+ assert all(
+ isinstance(body_grandchild_task, Signature)
+ for body_child_task in deserialized_chord.body.tasks
+ for body_grandchild_task in body_child_task.tasks
+ )
+
+ def test_from_dict_deep_deserialize_chain(self, subtests):
+ header = body = chain([self.add.s(1, 2)] * 42)
+ original_chord = chord(header=header, body=body)
+ serialized_chord = json.loads(json.dumps(original_chord))
+ deserialized_chord = chord.from_dict(serialized_chord)
+ with subtests.test(msg="Verify chord is deserialized"):
+ assert isinstance(deserialized_chord, chord)
+ # A header which is a chain gets unpacked into the chord's `tasks`
+ with subtests.test(
+ msg="Validate chord header tasks are deserialized and unpacked"
+ ):
+ assert all(
+ isinstance(child_task, Signature)
+ and not isinstance(child_task, chain)
+ for child_task in deserialized_chord.tasks
+ )
+ # A body which is a chain gets mutatated into the hidden `_chain` class
+ with subtests.test(
+ msg="Validate chord body is deserialized and not unpacked"
+ ):
+ assert isinstance(deserialized_chord.body, _chain)
+
class test_maybe_signature(CanvasCase):
From 6957f960a9f398995e17c28b77e0d402137d8455 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Tue, 8 Sep 2020 13:26:38 +1000
Subject: [PATCH 085/157] fix: Ensure group tasks are deeply deserialised
Fixes #6341
---
celery/canvas.py | 10 +++++++++-
t/integration/test_canvas.py | 3 ---
t/unit/tasks/test_canvas.py | 4 ----
3 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/celery/canvas.py b/celery/canvas.py
index 2150d0e872d..0279965d2ee 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -1047,8 +1047,16 @@ class group(Signature):
@classmethod
def from_dict(cls, d, app=None):
+ # We need to mutate the `kwargs` element in place to avoid confusing
+ # `freeze()` implementations which end up here and expect to be able to
+ # access elements from that dictionary later and refer to objects
+ # canonicalized here
+ orig_tasks = d["kwargs"]["tasks"]
+ d["kwargs"]["tasks"] = rebuilt_tasks = type(orig_tasks)((
+ maybe_signature(task, app=app) for task in orig_tasks
+ ))
return _upgrade(
- d, group(d['kwargs']['tasks'], app=app, **d['options']),
+ d, group(rebuilt_tasks, app=app, **d['options']),
)
def __init__(self, *tasks, **options):
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index 2de8c0aa428..a07da12d95d 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1133,7 +1133,6 @@ def test_rebuild_nested_chain_chord(self, manager):
)
sig.delay().get(timeout=TIMEOUT)
- @pytest.mark.xfail(reason="#6341")
def test_rebuild_nested_group_chain(self, manager):
sig = chain(
tasks.return_nested_signature_group_chain.s(),
@@ -1141,7 +1140,6 @@ def test_rebuild_nested_group_chain(self, manager):
)
sig.delay().get(timeout=TIMEOUT)
- @pytest.mark.xfail(reason="#6341")
def test_rebuild_nested_group_group(self, manager):
sig = chain(
tasks.return_nested_signature_group_group.s(),
@@ -1149,7 +1147,6 @@ def test_rebuild_nested_group_group(self, manager):
)
sig.delay().get(timeout=TIMEOUT)
- @pytest.mark.xfail(reason="#6341")
def test_rebuild_nested_group_chord(self, manager):
try:
manager.app.backend.ensure_chords_allowed()
diff --git a/t/unit/tasks/test_canvas.py b/t/unit/tasks/test_canvas.py
index 32c0af1db10..b6bd7f94cea 100644
--- a/t/unit/tasks/test_canvas.py
+++ b/t/unit/tasks/test_canvas.py
@@ -694,7 +694,6 @@ def test_from_dict(self):
x['args'] = None
assert group.from_dict(dict(x))
- @pytest.mark.xfail(reason="#6341")
def test_from_dict_deep_deserialize(self):
original_group = group([self.add.s(1, 2)] * 42)
serialized_group = json.loads(json.dumps(original_group))
@@ -704,7 +703,6 @@ def test_from_dict_deep_deserialize(self):
for child_task in deserialized_group.tasks
)
- @pytest.mark.xfail(reason="#6341")
def test_from_dict_deeper_deserialize(self):
inner_group = group([self.add.s(1, 2)] * 42)
outer_group = group([inner_group] * 42)
@@ -1112,7 +1110,6 @@ def test_from_dict_deep_deserialize(self, subtests):
with subtests.test(msg="Verify chord body is deserialized"):
assert isinstance(deserialized_chord.body, Signature)
- @pytest.mark.xfail(reason="#6341")
def test_from_dict_deep_deserialize_group(self, subtests):
header = body = group([self.add.s(1, 2)] * 42)
original_chord = chord(header=header, body=body)
@@ -1139,7 +1136,6 @@ def test_from_dict_deep_deserialize_group(self, subtests):
for body_child_task in deserialized_chord.body.tasks
)
- @pytest.mark.xfail(reason="#6341")
def test_from_dict_deeper_deserialize_group(self, subtests):
inner_group = group([self.add.s(1, 2)] * 42)
header = body = group([inner_group] * 42)
From 9b78de840d74d3e5cd6d4d7701ad64ba4a43fbe6 Mon Sep 17 00:00:00 2001
From: Lewis Kabui
Date: Sat, 17 Oct 2020 14:36:32 +0300
Subject: [PATCH 086/157] Fix `celery shell` command
---
celery/bin/shell.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/bin/shell.py b/celery/bin/shell.py
index 966773c5d11..b3b77e02fdb 100644
--- a/celery/bin/shell.py
+++ b/celery/bin/shell.py
@@ -130,7 +130,7 @@ def shell(ctx, ipython=False, bpython=False,
import_module('celery.concurrency.eventlet')
if gevent:
import_module('celery.concurrency.gevent')
- import celery.task.base
+ import celery
app = ctx.obj.app
app.loader.import_default_modules()
From e966cf1be71766c763d884fa57cf45e7444de75c Mon Sep 17 00:00:00 2001
From: Anthony Lukach
Date: Thu, 15 Oct 2020 08:53:02 -0600
Subject: [PATCH 087/157] predefined_queues_urls -> predefined_queues
---
docs/getting-started/brokers/sqs.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/getting-started/brokers/sqs.rst b/docs/getting-started/brokers/sqs.rst
index 5b108cdc048..2e41ce4ef9e 100644
--- a/docs/getting-started/brokers/sqs.rst
+++ b/docs/getting-started/brokers/sqs.rst
@@ -137,7 +137,7 @@ Predefined Queues
If you want Celery to use a set of predefined queues in AWS, and to
never attempt to list SQS queues, nor attempt to create or delete them,
-pass a map of queue names to URLs using the :setting:`predefined_queue_urls`
+pass a map of queue names to URLs using the :setting:`predefined_queues`
setting::
broker_transport_options = {
From 387518c6b2b53f816c5a59facafe500b075f7c01 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Sun, 18 Oct 2020 17:32:10 +0300
Subject: [PATCH 088/157] Update changelog.
---
Changelog.rst | 29 +++++++++++++++++++++++++++++
1 file changed, 29 insertions(+)
diff --git a/Changelog.rst b/Changelog.rst
index a8fc6d47665..14c2b0b0b4c 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -8,6 +8,35 @@ This document contains change notes for bugfix & new features
in the 5.0.x series, please see :ref:`whatsnew-5.0` for
an overview of what's new in Celery 5.0.
+.. _version-5.0.1:
+
+5.0.1
+=====
+:release-date: 2020-10-18 1.00 P.M UTC+3:00
+:release-by: Omer Katz
+
+- Specify UTF-8 as the encoding for log files (#6357).
+- Custom headers now propagate when using the protocol 1 hybrid messages (#6374).
+- Retry creating the database schema for the database results backend
+ in case of a race condition (#6298).
+- When using the Redis results backend, awaiting for a chord no longer hangs
+ when setting :setting:`result_expires` to 0 (#6373).
+- When a user tries to specify the app as an option for the subcommand,
+ a custom error message is displayed (#6363).
+- Fix the `--without-gossip`, `--without-mingle`, and `--without-heartbeat`
+ options which now work as expected. (#6365)
+- Provide a clearer error message when the application cannot be loaded.
+- Avoid printing deprecation warnings for settings when they are loaded from
+ Django settings (#6385).
+- Allow lowercase log levels for the `--loglevel` option (#6388).
+- Detaching now works as expected (#6401).
+- Restore broadcasting messages from `celery control` (#6400).
+- Pass back real result for single task chains (#6411).
+- Ensure group tasks a deeply serialized (#6342).
+- Fix chord element counting (#6354).
+- Restore the `celery shell` command (#6421).
+
+.. _version-5.0.0:
5.0.0
=====
From b50b178f41c798f63aad77c0e4908c8a7139a753 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Sun, 18 Oct 2020 17:34:42 +0300
Subject: [PATCH 089/157] =?UTF-8?q?Bump=20version:=205.0.0=20=E2=86=92=205?=
=?UTF-8?q?.0.1?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.bumpversion.cfg | 2 +-
README.rst | 6 +++---
celery/__init__.py | 2 +-
docs/includes/introduction.txt | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 80aca1abc6f..ea5f7e924c0 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 5.0.0
+current_version = 5.0.1
commit = True
tag = True
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(?P[a-z\d]+)?
diff --git a/README.rst b/README.rst
index 3896f32a6fa..8cb4e40671b 100644
--- a/README.rst
+++ b/README.rst
@@ -2,7 +2,7 @@
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge|
-:Version: 5.0.0 (singularity)
+:Version: 5.0.1 (singularity)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
@@ -57,7 +57,7 @@ in such a way that the client enqueues an URL to be requested by a worker.
What do I need?
===============
-Celery version 5.0.0 runs on,
+Celery version 5.0.1 runs on,
- Python (3.6, 3.7, 3.8)
- PyPy3.6 (7.6)
@@ -89,7 +89,7 @@ Get Started
===========
If this is the first time you're trying to use Celery, or you're
-new to Celery 5.0.0 coming from previous versions then you should read our
+new to Celery 5.0.1 coming from previous versions then you should read our
getting started tutorials:
- `First steps with Celery`_
diff --git a/celery/__init__.py b/celery/__init__.py
index 9ccaae8874d..a9f497130e7 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -17,7 +17,7 @@
SERIES = 'singularity'
-__version__ = '5.0.0'
+__version__ = '5.0.1'
__author__ = 'Ask Solem'
__contact__ = 'auvipy@gmail.com'
__homepage__ = 'http://celeryproject.org'
diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt
index 0ba1f965b3f..188fd291478 100644
--- a/docs/includes/introduction.txt
+++ b/docs/includes/introduction.txt
@@ -1,4 +1,4 @@
-:Version: 5.0.0 (cliffs)
+:Version: 5.0.1 (cliffs)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
From 76596a1892a2c5d826b3d5ffb16623d1b645bb6b Mon Sep 17 00:00:00 2001
From: Safwan Rahman
Date: Mon, 19 Oct 2020 16:10:57 +0600
Subject: [PATCH 090/157] [Fix #6361] Fixing documentation for RabbitMQ
task_queue_ha_policy
---
docs/userguide/configuration.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index 67b3bf96846..a9d0379972f 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -2122,8 +2122,8 @@ Or you can give it a list of nodes to replicate to:
task_queue_ha_policy = ['rabbit@host1', 'rabbit@host2']
-Using a list will implicitly set ``x-ha-policy`` to 'nodes' and
-``x-ha-policy-params`` to the given list of nodes.
+Using a list will implicitly set ``ha-mode`` to 'nodes' and
+``ha-params`` to the given list of nodes.
See http://www.rabbitmq.com/ha.html for more information.
From a9b1918ac670dd27a55f20ab37c86d8bc8454f3a Mon Sep 17 00:00:00 2001
From: Stepan Henek
Date: Mon, 19 Oct 2020 10:03:34 +0200
Subject: [PATCH 091/157] Fix _autodiscover_tasks_from_fixups function
---
celery/app/base.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/app/base.py b/celery/app/base.py
index 3e33bb068e1..ab9433a8a4e 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -649,8 +649,8 @@ def _autodiscover_tasks_from_names(self, packages, related_name):
def _autodiscover_tasks_from_fixups(self, related_name):
return self._autodiscover_tasks_from_names([
pkg for fixup in self._fixups
- for pkg in fixup.autodiscover_tasks()
if hasattr(fixup, 'autodiscover_tasks')
+ for pkg in fixup.autodiscover_tasks()
], related_name=related_name)
def send_task(self, name, args=None, kwargs=None, countdown=None,
From 3187044b57335f37fe18f47f230efc0fb00f4d58 Mon Sep 17 00:00:00 2001
From: Stepan Henek
Date: Mon, 19 Oct 2020 22:53:42 +0200
Subject: [PATCH 092/157] fixup! Fix _autodiscover_tasks_from_fixups function
---
t/unit/app/test_app.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
index 969489fa164..a533d0cc4d4 100644
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -218,6 +218,13 @@ def test_using_v1_reduce(self):
self.app._using_v1_reduce = True
assert loads(dumps(self.app))
+ def test_autodiscover_tasks_force_fixup_fallback(self):
+ self.app.loader.autodiscover_tasks = Mock()
+ self.app.autodiscover_tasks([], force=True)
+ self.app.loader.autodiscover_tasks.assert_called_with(
+ [], 'tasks',
+ )
+
def test_autodiscover_tasks_force(self):
self.app.loader.autodiscover_tasks = Mock()
self.app.autodiscover_tasks(['proj.A', 'proj.B'], force=True)
From 215d3c1eb4c49ef1a6e89ce9d438ade638746a68 Mon Sep 17 00:00:00 2001
From: KexZh
Date: Wed, 21 Oct 2020 13:34:59 +1300
Subject: [PATCH 093/157] Correct configuration item: CELERY_RESULT_EXPIRES
Related issue: https://github.com/celery/celery/issues/4050
https://github.com/celery/celery/issues/4050#issuecomment-524626647
---
docs/userguide/configuration.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index a9d0379972f..5331c4b9a58 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -115,7 +115,7 @@ have been moved into a new ``task_`` prefix.
``CELERY_MESSAGE_COMPRESSION`` :setting:`result_compression`
``CELERY_RESULT_EXCHANGE`` :setting:`result_exchange`
``CELERY_RESULT_EXCHANGE_TYPE`` :setting:`result_exchange_type`
-``CELERY_TASK_RESULT_EXPIRES`` :setting:`result_expires`
+``CELERY_RESULT_EXPIRES`` :setting:`result_expires`
``CELERY_RESULT_PERSISTENT`` :setting:`result_persistent`
``CELERY_RESULT_SERIALIZER`` :setting:`result_serializer`
``CELERY_RESULT_DBURI`` Use :setting:`result_backend` instead.
From a2498d37aa40614a2eecb3dddcae61754056b5c9 Mon Sep 17 00:00:00 2001
From: Thomas Riccardi
Date: Thu, 22 Oct 2020 17:47:14 +0200
Subject: [PATCH 094/157] Flush worker prints, notably the banner
In some cases (kubernetes, root) the banner is only printed at the
end of the process execution, instead of at the beginning.
---
celery/apps/worker.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/celery/apps/worker.py b/celery/apps/worker.py
index 882751fb8a9..c220857eb3a 100644
--- a/celery/apps/worker.py
+++ b/celery/apps/worker.py
@@ -79,7 +79,7 @@ def active_thread_count():
def safe_say(msg):
- print(f'\n{msg}', file=sys.__stderr__)
+ print(f'\n{msg}', file=sys.__stderr__, flush=True)
class Worker(WorkController):
@@ -169,7 +169,7 @@ def emit_banner(self):
str(self.colored.cyan(
' \n', self.startup_info(artlines=not use_image))),
str(self.colored.reset(self.extra_info() or '')),
- ])), file=sys.__stdout__)
+ ])), file=sys.__stdout__, flush=True)
def on_consumer_ready(self, consumer):
signals.worker_ready.send(sender=consumer)
@@ -187,7 +187,7 @@ def purge_messages(self):
with self.app.connection_for_write() as connection:
count = self.app.control.purge(connection=connection)
if count: # pragma: no cover
- print(f"purge: Erased {count} {pluralize(count, 'message')} from the queue.\n")
+ print(f"purge: Erased {count} {pluralize(count, 'message')} from the queue.\n", flush=True)
def tasklist(self, include_builtins=True, sep='\n', int_='celery.'):
return sep.join(
From 8c5e9888ae10288ae1b2113bdce6a4a41c47354b Mon Sep 17 00:00:00 2001
From: Safwan Rahman
Date: Tue, 27 Oct 2020 02:34:51 +0600
Subject: [PATCH 095/157] [Fix #6361] Remove RabbitMQ ha_policy from queue
---
celery/app/amqp.py | 21 +++------------------
celery/app/defaults.py | 1 -
docs/userguide/configuration.rst | 27 ---------------------------
t/unit/app/test_amqp.py | 32 --------------------------------
4 files changed, 3 insertions(+), 78 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 7031bc8b9b6..1a0454e9a92 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -46,7 +46,6 @@ class Queues(dict):
create_missing (bool): By default any unknown queues will be
added automatically, but if this flag is disabled the occurrence
of unknown queues in `wanted` will raise :exc:`KeyError`.
- ha_policy (Sequence, str): Default HA policy for queues with none set.
max_priority (int): Default x-max-priority for queues with none set.
"""
@@ -55,14 +54,13 @@ class Queues(dict):
_consume_from = None
def __init__(self, queues=None, default_exchange=None,
- create_missing=True, ha_policy=None, autoexchange=None,
+ create_missing=True, autoexchange=None,
max_priority=None, default_routing_key=None):
dict.__init__(self)
self.aliases = WeakValueDictionary()
self.default_exchange = default_exchange
self.default_routing_key = default_routing_key
self.create_missing = create_missing
- self.ha_policy = ha_policy
self.autoexchange = Exchange if autoexchange is None else autoexchange
self.max_priority = max_priority
if queues is not None and not isinstance(queues, Mapping):
@@ -122,10 +120,6 @@ def _add(self, queue):
queue.exchange = self.default_exchange
if not queue.routing_key:
queue.routing_key = self.default_routing_key
- if self.ha_policy:
- if queue.queue_arguments is None:
- queue.queue_arguments = {}
- self._set_ha_policy(queue.queue_arguments)
if self.max_priority is not None:
if queue.queue_arguments is None:
queue.queue_arguments = {}
@@ -133,13 +127,6 @@ def _add(self, queue):
self[queue.name] = queue
return queue
- def _set_ha_policy(self, args):
- policy = self.ha_policy
- if isinstance(policy, (list, tuple)):
- return args.update({'ha-mode': 'nodes',
- 'ha-params': list(policy)})
- args['ha-mode'] = policy
-
def _set_max_priority(self, args):
if 'x-max-priority' not in args and self.max_priority is not None:
return args.update({'x-max-priority': self.max_priority})
@@ -251,7 +238,7 @@ def create_task_message(self):
def send_task_message(self):
return self._create_task_sender()
- def Queues(self, queues, create_missing=None, ha_policy=None,
+ def Queues(self, queues, create_missing=None,
autoexchange=None, max_priority=None):
# Create new :class:`Queues` instance, using queue defaults
# from the current configuration.
@@ -259,8 +246,6 @@ def Queues(self, queues, create_missing=None, ha_policy=None,
default_routing_key = conf.task_default_routing_key
if create_missing is None:
create_missing = conf.task_create_missing_queues
- if ha_policy is None:
- ha_policy = conf.task_queue_ha_policy
if max_priority is None:
max_priority = conf.task_queue_max_priority
if not queues and conf.task_default_queue:
@@ -271,7 +256,7 @@ def Queues(self, queues, create_missing=None, ha_policy=None,
else autoexchange)
return self.queues_cls(
queues, self.default_exchange, create_missing,
- ha_policy, autoexchange, max_priority, default_routing_key,
+ autoexchange, max_priority, default_routing_key,
)
def Router(self, queues=None, create_missing=None):
diff --git a/celery/app/defaults.py b/celery/app/defaults.py
index d0fa9d20b54..9fec8472c96 100644
--- a/celery/app/defaults.py
+++ b/celery/app/defaults.py
@@ -267,7 +267,6 @@ def __repr__(self):
type='dict', old={'celery_task_publish_retry_policy'},
),
queues=Option(type='dict'),
- queue_ha_policy=Option(None, type='string'),
queue_max_priority=Option(None, type='int'),
reject_on_worker_lost=Option(type='bool'),
remote_tracebacks=Option(False, type='bool'),
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index 5331c4b9a58..e9c1c76c151 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -2100,33 +2100,6 @@ The final routing options for ``tasks.add`` will become:
See :ref:`routers` for more examples.
-.. setting:: task_queue_ha_policy
-
-``task_queue_ha_policy``
-~~~~~~~~~~~~~~~~~~~~~~~~
-:brokers: RabbitMQ
-
-Default: :const:`None`.
-
-This will set the default HA policy for a queue, and the value
-can either be a string (usually ``all``):
-
-.. code-block:: python
-
- task_queue_ha_policy = 'all'
-
-Using 'all' will replicate the queue to all current nodes,
-Or you can give it a list of nodes to replicate to:
-
-.. code-block:: python
-
- task_queue_ha_policy = ['rabbit@host1', 'rabbit@host2']
-
-Using a list will implicitly set ``ha-mode`` to 'nodes' and
-``ha-params`` to the given list of nodes.
-
-See http://www.rabbitmq.com/ha.html for more information.
-
.. setting:: task_queue_max_priority
``task_queue_max_priority``
diff --git a/t/unit/app/test_amqp.py b/t/unit/app/test_amqp.py
index ee36c08e235..bc2d26d3680 100644
--- a/t/unit/app/test_amqp.py
+++ b/t/unit/app/test_amqp.py
@@ -89,23 +89,6 @@ def test_setitem_adds_default_exchange(self):
q['foo'] = queue
assert q['foo'].exchange == q.default_exchange
- @pytest.mark.parametrize('ha_policy,qname,q,qargs,expected', [
- (None, 'xyz', 'xyz', None, None),
- (None, 'xyz', 'xyz', {'x-foo': 'bar'}, {'x-foo': 'bar'}),
- ('all', 'foo', Queue('foo'), None, {'ha-mode': 'all'}),
- ('all', 'xyx2',
- Queue('xyx2', queue_arguments={'x-foo': 'bar'}),
- None,
- {'ha-mode': 'all', 'x-foo': 'bar'}),
- (['A', 'B', 'C'], 'foo', Queue('foo'), None, {
- 'ha-mode': 'nodes',
- 'ha-params': ['A', 'B', 'C']}),
- ])
- def test_with_ha_policy(self, ha_policy, qname, q, qargs, expected):
- queues = Queues(ha_policy=ha_policy, create_missing=False)
- queues.add(q, queue_arguments=qargs)
- assert queues[qname].queue_arguments == expected
-
def test_select_add(self):
q = Queues()
q.select(['foo', 'bar'])
@@ -118,11 +101,6 @@ def test_deselect(self):
q.deselect('bar')
assert sorted(q._consume_from.keys()) == ['foo']
- def test_with_ha_policy_compat(self):
- q = Queues(ha_policy='all')
- q.add('bar')
- assert q['bar'].queue_arguments == {'ha-mode': 'all'}
-
def test_add_default_exchange(self):
ex = Exchange('fff', 'fanout')
q = Queues(default_exchange=ex)
@@ -143,12 +121,6 @@ def test_alias(self):
({'max_priority': 10},
'moo', Queue('moo', queue_arguments=None),
{'x-max-priority': 10}),
- ({'ha_policy': 'all', 'max_priority': 5},
- 'bar', 'bar',
- {'ha-mode': 'all', 'x-max-priority': 5}),
- ({'ha_policy': 'all', 'max_priority': 5},
- 'xyx2', Queue('xyx2', queue_arguments={'x-max-priority': 2}),
- {'ha-mode': 'all', 'x-max-priority': 2}),
({'max_priority': None},
'foo2', 'foo2',
None),
@@ -255,10 +227,6 @@ def test_countdown_negative(self):
with pytest.raises(ValueError):
self.app.amqp.as_task_v2(uuid(), 'foo', countdown=-1232132323123)
- def test_Queues__with_ha_policy(self):
- x = self.app.amqp.Queues({}, ha_policy='all')
- assert x.ha_policy == 'all'
-
def test_Queues__with_max_priority(self):
x = self.app.amqp.Queues({}, max_priority=23)
assert x.max_priority == 23
From 678e422092381d19d88aa928ee308d9562c545d9 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Fri, 16 Oct 2020 09:33:42 +1100
Subject: [PATCH 096/157] ci: Fix TOXENV for pypy3 unit tests
Fixes #6409
---
.travis.yml | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/.travis.yml b/.travis.yml
index 96fb6f4d872..dc7e1e3c6c5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -64,8 +64,9 @@ jobs:
- TOXENV=flake8,apicheck,configcheck,bandit
- CELERY_TOX_PARALLEL='--parallel --parallel-live'
stage: lint
+
- python: pypy3.6-7.3.1
- env: TOXENV=pypy3
+ env: TOXENV=pypy3-unit
stage: test
before_install:
From f95f56842640c8c9f4050a233cfbc051a07ee376 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Fri, 16 Oct 2020 09:38:06 +1100
Subject: [PATCH 097/157] ci: Move Python 3.9 test base from dev to release
---
.travis.yml | 6 +++---
tox.ini | 12 ++++++------
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index dc7e1e3c6c5..3c532ee95de 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -5,7 +5,7 @@ python:
- '3.6'
- '3.7'
- '3.8'
- - '3.9-dev'
+ - '3.9'
os:
- linux
stages:
@@ -25,9 +25,9 @@ env:
jobs:
fast_finish: true
allow_failures:
- - python: '3.9-dev'
+ - python: '3.9'
include:
- - python: '3.9-dev'
+ - python: '3.9'
env: MATRIX_TOXENV=integration-rabbitmq
stage: integration
diff --git a/tox.ini b/tox.ini
index 1b12965923a..8ec20b7a007 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,7 @@
[tox]
envlist =
- {3.6,3.7,3.8,3.9-dev,pypy3}-unit
- {3.6,3.7,3.8,3.9-dev,pypy3}-integration-{rabbitmq,redis,dynamodb,azureblockblob,cache,cassandra,elasticsearch}
+ {3.6,3.7,3.8,3.9,pypy3}-unit
+ {3.6,3.7,3.8,3.9,pypy3}-integration-{rabbitmq,redis,dynamodb,azureblockblob,cache,cassandra,elasticsearch}
flake8
apicheck
@@ -14,9 +14,9 @@ deps=
-r{toxinidir}/requirements/test.txt
-r{toxinidir}/requirements/pkgutils.txt
- 3.6,3.7,3.8,3.9-dev: -r{toxinidir}/requirements/test-ci-default.txt
- 3.5,3.6,3.7,3.8,3.9-dev: -r{toxinidir}/requirements/docs.txt
- 3.6,3.7,3.8,3.9-dev: -r{toxinidir}/requirements/docs.txt
+ 3.6,3.7,3.8,3.9: -r{toxinidir}/requirements/test-ci-default.txt
+ 3.5,3.6,3.7,3.8,3.9: -r{toxinidir}/requirements/docs.txt
+ 3.6,3.7,3.8,3.9: -r{toxinidir}/requirements/docs.txt
pypy3: -r{toxinidir}/requirements/test-ci-base.txt
integration: -r{toxinidir}/requirements/test-integration.txt
@@ -63,7 +63,7 @@ basepython =
3.6: python3.6
3.7: python3.7
3.8: python3.8
- 3.9-dev: python3.9
+ 3.9: python3.9
pypy3: pypy3
flake8,apicheck,linkcheck,configcheck,bandit: python3.8
flakeplus: python2.7
From 7c3da03a07882ca86b801ad78dd509a67cba60af Mon Sep 17 00:00:00 2001
From: Egor Sergeevich Poderiagin
Date: Thu, 29 Oct 2020 18:16:46 +0700
Subject: [PATCH 098/157] docs: fix celery beat settings
---
docs/userguide/configuration.rst | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index e9c1c76c151..f942188d07d 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -65,11 +65,11 @@ have been moved into a new ``task_`` prefix.
``CELERY_IMPORTS`` :setting:`imports`
``CELERY_INCLUDE`` :setting:`include`
``CELERY_TIMEZONE`` :setting:`timezone`
-``CELERYBEAT_MAX_LOOP_INTERVAL`` :setting:`beat_max_loop_interval`
-``CELERYBEAT_SCHEDULE`` :setting:`beat_schedule`
-``CELERYBEAT_SCHEDULER`` :setting:`beat_scheduler`
-``CELERYBEAT_SCHEDULE_FILENAME`` :setting:`beat_schedule_filename`
-``CELERYBEAT_SYNC_EVERY`` :setting:`beat_sync_every`
+``CELERY_BEAT_MAX_LOOP_INTERVAL`` :setting:`beat_max_loop_interval`
+``CELERY_BEAT_SCHEDULE`` :setting:`beat_schedule`
+``CELERY_BEAT_SCHEDULER`` :setting:`beat_scheduler`
+``CELERY_BEAT_SCHEDULE_FILENAME`` :setting:`beat_schedule_filename`
+``CELERY_BEAT_SYNC_EVERY`` :setting:`beat_sync_every`
``BROKER_URL`` :setting:`broker_url`
``BROKER_TRANSPORT`` :setting:`broker_transport`
``BROKER_TRANSPORT_OPTIONS`` :setting:`broker_transport_options`
From 70dc29e2c0286151ef3f5e267a5e912ff932927a Mon Sep 17 00:00:00 2001
From: "Asif Saif Uddin (Auvi)"
Date: Sat, 31 Oct 2020 11:13:22 +0600
Subject: [PATCH 099/157] move to travis-ci.com
---
README.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.rst b/README.rst
index 8cb4e40671b..25b5e2c8e2b 100644
--- a/README.rst
+++ b/README.rst
@@ -498,9 +498,9 @@ file in the top distribution directory for the full license text.
.. # vim: syntax=rst expandtab tabstop=4 shiftwidth=4 shiftround
-.. |build-status| image:: https://secure.travis-ci.org/celery/celery.png?branch=master
+.. |build-status| image:: https://api.travis-ci.com/celery/celery.png?branch=master
:alt: Build status
- :target: https://travis-ci.org/celery/celery
+ :target: https://travis-ci.com/celery/celery
.. |coverage| image:: https://codecov.io/github/celery/celery/coverage.svg?branch=master
:target: https://codecov.io/github/celery/celery?branch=master
From 0db172ef3b6b1771c763e0ec7937bdba63dacbc8 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Mon, 2 Nov 2020 00:39:45 +1100
Subject: [PATCH 100/157] fix: Ensure default fairness maps to `SCHED_FAIR`
(#6447)
Fixes #6386
---
celery/concurrency/asynpool.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index 4d2dd1138d2..7ea3eb204c9 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -84,6 +84,7 @@ def unpack_from(fmt, iobuf, unpack=unpack): # noqa
SCHED_STRATEGIES = {
None: SCHED_STRATEGY_FAIR,
+ 'default': SCHED_STRATEGY_FAIR,
'fast': SCHED_STRATEGY_FCFS,
'fcfs': SCHED_STRATEGY_FCFS,
'fair': SCHED_STRATEGY_FAIR,
From 56fc486c02b8ec635ef930490ab751e7f582cc72 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Sun, 1 Nov 2020 16:19:23 +0200
Subject: [PATCH 101/157] Preserve callbacks when replacing a task with a chain
(#6189)
* Preserve callbacks when replacing a task with a chain.
* Preserve callbacks when replacing a task with a chain.
* Added tests.
* Update celery/app/task.py
Co-authored-by: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
* Mark test as flaky.
* Fix race condition in CI.
* fix: Run linked tasks in original slot for replace
This change alters the handling of linked tasks for chains which are
used as the argument to a `.replace()` call for a task which itself has
a chain of signatures to call once it completes. We ensure that the
linked callback is not only retained but also called at the appropiate
point in the newly reconstructed chain comprised of tasks from both the
replacement chain and the tail of the encapsulating chain of the task
being replaced.
We amend some tests to validate this behaviour better and ensure that
call/errbacks behave as expected if the encapsulating chain has either
set. One test is marked with an `xfail` since errbacks of encapsulating
chains are not currently called as expected due to some ambiguity in
when an errback of a replaced task should be dropped or not (#6441).
Co-authored-by: Asif Saif Uddin
Co-authored-by: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
---
celery/app/task.py | 11 +++
t/integration/tasks.py | 30 +++++++-
t/integration/test_canvas.py | 134 ++++++++++++++++++++++++++++++++++-
3 files changed, 171 insertions(+), 4 deletions(-)
diff --git a/celery/app/task.py b/celery/app/task.py
index 86c4e727d49..f8ffaefaffd 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -882,6 +882,17 @@ def replace(self, sig):
)
if self.request.chain:
+ # We need to freeze the new signature with the current task's ID to
+ # ensure that we don't disassociate the new chain from the existing
+ # task IDs which would break previously constructed results
+ # objects.
+ sig.freeze(self.request.id)
+ if "link" in sig.options:
+ final_task_links = sig.tasks[-1].options.setdefault("link", [])
+ final_task_links.extend(maybe_list(sig.options["link"]))
+ # Construct the new remainder of the task by chaining the signature
+ # we're being replaced by with signatures constructed from the
+ # chain elements in the current request.
for t in reversed(self.request.chain):
sig |= signature(t, app=self.app)
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
index 1b4bb581b0c..8aa13bc1797 100644
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -22,7 +22,7 @@ def add(x, y):
@shared_task
-def raise_error():
+def raise_error(*args):
"""Deliberately raise an error."""
raise ValueError("deliberate error")
@@ -76,6 +76,30 @@ def add_replaced(self, x, y):
raise self.replace(add.s(x, y))
+@shared_task(bind=True)
+def replace_with_chain(self, *args, link_msg=None):
+ c = chain(identity.s(*args), identity.s())
+ link_sig = redis_echo.s()
+ if link_msg is not None:
+ link_sig.args = (link_msg,)
+ link_sig.set(immutable=True)
+ c.link(link_sig)
+
+ return self.replace(c)
+
+
+@shared_task(bind=True)
+def replace_with_chain_which_raises(self, *args, link_msg=None):
+ c = chain(identity.s(*args), raise_error.s())
+ link_sig = redis_echo.s()
+ if link_msg is not None:
+ link_sig.args = (link_msg,)
+ link_sig.set(immutable=True)
+ c.link_error(link_sig)
+
+ return self.replace(c)
+
+
@shared_task(bind=True)
def add_to_all(self, nums, val):
"""Add the given value to all supplied numbers."""
@@ -143,7 +167,8 @@ def retry_once(self, *args, expires=60.0, max_retries=1, countdown=0.1):
@shared_task(bind=True, expires=60.0, max_retries=1)
-def retry_once_priority(self, *args, expires=60.0, max_retries=1, countdown=0.1):
+def retry_once_priority(self, *args, expires=60.0, max_retries=1,
+ countdown=0.1):
"""Task that fails and is retried. Returns the priority."""
if self.request.retries:
return self.request.delivery_info['priority']
@@ -160,7 +185,6 @@ def redis_echo(message):
@shared_task(bind=True)
def second_order_replace1(self, state=False):
-
redis_connection = get_redis_connection()
if not state:
redis_connection.rpush('redis-echo', 'In A')
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index a07da12d95d..4ae027fb10a 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -17,7 +17,7 @@
delayed_sum_with_soft_guard, fail, identity, ids,
print_unicode, raise_error, redis_echo, retry_once,
return_exception, return_priority, second_order_replace1,
- tsum)
+ tsum, replace_with_chain, replace_with_chain_which_raises)
RETRYABLE_EXCEPTIONS = (OSError, ConnectionError, TimeoutError)
@@ -414,6 +414,7 @@ def test_chain_of_a_chord_and_three_tasks_and_a_group(self, manager):
res = c()
assert res.get(timeout=TIMEOUT) == [8, 8]
+ @flaky
def test_nested_chain_group_lone(self, manager):
"""
Test that a lone group in a chain completes.
@@ -452,6 +453,137 @@ def test_nested_chain_group_last(self, manager):
res = sig.delay()
assert res.get(timeout=TIMEOUT) == [42, 42]
+ def test_chain_replaced_with_a_chain_and_a_callback(self, manager):
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+
+ redis_connection = get_redis_connection()
+ redis_connection.delete('redis-echo')
+
+ link_msg = 'Internal chain callback'
+ c = chain(
+ identity.s('Hello '),
+ # The replacement chain will pass its args though
+ replace_with_chain.s(link_msg=link_msg),
+ add.s('world'),
+ )
+ res = c.delay()
+
+ assert res.get(timeout=TIMEOUT) == 'Hello world'
+
+ expected_msgs = {link_msg, }
+ while expected_msgs:
+ maybe_key_msg = redis_connection.blpop('redis-echo', TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError('redis-echo')
+ _, msg = maybe_key_msg
+ msg = msg.decode()
+ expected_msgs.remove(msg) # KeyError if `msg` is not in here
+
+ # There should be no more elements - block momentarily
+ assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
+ redis_connection.delete('redis-echo')
+
+ def test_chain_replaced_with_a_chain_and_an_error_callback(self, manager):
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+
+ redis_connection = get_redis_connection()
+ redis_connection.delete('redis-echo')
+
+ link_msg = 'Internal chain errback'
+ c = chain(
+ identity.s('Hello '),
+ replace_with_chain_which_raises.s(link_msg=link_msg),
+ add.s(' will never be seen :(')
+ )
+ res = c.delay()
+
+ with pytest.raises(ValueError):
+ res.get(timeout=TIMEOUT)
+
+ expected_msgs = {link_msg, }
+ while expected_msgs:
+ maybe_key_msg = redis_connection.blpop('redis-echo', TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError('redis-echo')
+ _, msg = maybe_key_msg
+ msg = msg.decode()
+ expected_msgs.remove(msg) # KeyError if `msg` is not in here
+
+ # There should be no more elements - block momentarily
+ assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
+ redis_connection.delete('redis-echo')
+
+ def test_chain_with_cb_replaced_with_chain_with_cb(self, manager):
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+
+ redis_connection = get_redis_connection()
+ redis_connection.delete('redis-echo')
+
+ link_msg = 'Internal chain callback'
+ c = chain(
+ identity.s('Hello '),
+ # The replacement chain will pass its args though
+ replace_with_chain.s(link_msg=link_msg),
+ add.s('world'),
+ )
+ c.link(redis_echo.s())
+ res = c.delay()
+
+ assert res.get(timeout=TIMEOUT) == 'Hello world'
+
+ expected_msgs = {link_msg, 'Hello world'}
+ while expected_msgs:
+ maybe_key_msg = redis_connection.blpop('redis-echo', TIMEOUT)
+ if maybe_key_msg is None:
+ raise TimeoutError('redis-echo')
+ _, msg = maybe_key_msg
+ msg = msg.decode()
+ expected_msgs.remove(msg) # KeyError if `msg` is not in here
+
+ # There should be no more elements - block momentarily
+ assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
+ redis_connection.delete('redis-echo')
+
+ @pytest.mark.xfail(reason="#6441")
+ def test_chain_with_eb_replaced_with_chain_with_eb(self, manager):
+ if not manager.app.conf.result_backend.startswith('redis'):
+ raise pytest.skip('Requires redis result backend.')
+
+ redis_connection = get_redis_connection()
+ redis_connection.delete('redis-echo')
+
+ inner_link_msg = 'Internal chain errback'
+ outer_link_msg = 'External chain errback'
+ c = chain(
+ identity.s('Hello '),
+ # The replacement chain will pass its args though
+ replace_with_chain_which_raises.s(link_msg=inner_link_msg),
+ add.s('world'),
+ )
+ c.link_error(redis_echo.s(outer_link_msg))
+ res = c.delay()
+
+ with pytest.raises(ValueError):
+ res.get(timeout=TIMEOUT)
+
+ expected_msgs = {inner_link_msg, outer_link_msg}
+ while expected_msgs:
+ # Shorter timeout here because we expect failure
+ timeout = min(5, TIMEOUT)
+ maybe_key_msg = redis_connection.blpop('redis-echo', timeout)
+ if maybe_key_msg is None:
+ raise TimeoutError('redis-echo')
+ _, msg = maybe_key_msg
+ msg = msg.decode()
+ expected_msgs.remove(msg) # KeyError if `msg` is not in here
+
+ # There should be no more elements - block momentarily
+ assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
+ redis_connection.delete('redis-echo')
+
class test_result_set:
From f1145a2d91bd525f8e0f7a5662c9093e02fbf5a8 Mon Sep 17 00:00:00 2001
From: "Lewis M. Kabui" <13940255+lewisemm@users.noreply.github.com>
Date: Mon, 2 Nov 2020 13:56:45 +0300
Subject: [PATCH 102/157] Fix minor documentation omission (#6453)
Co-authored-by: Lewis Kabui
---
docs/userguide/monitoring.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/userguide/monitoring.rst b/docs/userguide/monitoring.rst
index 40e9991b572..725f264057f 100644
--- a/docs/userguide/monitoring.rst
+++ b/docs/userguide/monitoring.rst
@@ -33,7 +33,7 @@ To list all the commands available do:
.. code-block:: console
- $ celery help
+ $ celery --help
or to get help for a specific command do:
From 0833a270fae4738e128d56a63d0c4446ba0b1927 Mon Sep 17 00:00:00 2001
From: Ixiodor
Date: Mon, 2 Nov 2020 12:28:51 +0100
Subject: [PATCH 103/157] Fix max_retries override on self.retry (#6436)
* Fix max_retries override
* Fix max_retries override
* Fix max_retries override
* Update exceptions.py
typo
* Update autoretry.py
typo
* Update task.py
Prevent exception unpacking for tasks without autoretry_for
* Update test_tasks.py
Unit test
* Update test_tasks.py
Added a new test
* Update autoretry.py
Fox for explicit raise in tasks
* Update test_tasks.py
* Update autoretry.py
* Update task.py
* Update exceptions.py
* Update task.py
---
celery/app/autoretry.py | 11 ++++++++++-
celery/app/task.py | 2 ++
celery/exceptions.py | 1 +
t/unit/tasks/test_tasks.py | 37 +++++++++++++++++++++++++++++++++++++
4 files changed, 50 insertions(+), 1 deletion(-)
diff --git a/celery/app/autoretry.py b/celery/app/autoretry.py
index 21c90e026a2..5da8487bd5f 100644
--- a/celery/app/autoretry.py
+++ b/celery/app/autoretry.py
@@ -46,6 +46,15 @@ def run(*args, **kwargs):
retries=task.request.retries,
maximum=retry_backoff_max,
full_jitter=retry_jitter)
- raise task.retry(exc=exc, **retry_kwargs)
+ # Override max_retries
+ if hasattr(task, 'override_max_retries'):
+ retry_kwargs['max_retries'] = getattr(task,
+ 'override_max_retries',
+ task.max_retries)
+ ret = task.retry(exc=exc, **retry_kwargs)
+ # Stop propagation
+ if hasattr(task, 'override_max_retries'):
+ delattr(task, 'override_max_retries')
+ raise ret
task._orig_run, task.run = task.run, run
diff --git a/celery/app/task.py b/celery/app/task.py
index f8ffaefaffd..cab270cfa30 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -675,6 +675,8 @@ def retry(self, args=None, kwargs=None, exc=None, throw=True,
"""
request = self.request
retries = request.retries + 1
+ if max_retries is not None:
+ self.override_max_retries = max_retries
max_retries = self.max_retries if max_retries is None else max_retries
# Not in worker or emulated by (apply/always_eager),
diff --git a/celery/exceptions.py b/celery/exceptions.py
index 768cd4d22d2..ee903290f2f 100644
--- a/celery/exceptions.py
+++ b/celery/exceptions.py
@@ -293,3 +293,4 @@ def __init__(self, *args, **kwargs):
def __repr__(self):
return super().__repr__() + " state:" + self.state + " task_id:" + self.task_id
+
diff --git a/t/unit/tasks/test_tasks.py b/t/unit/tasks/test_tasks.py
index 154ee0295cb..8e1c05a5796 100644
--- a/t/unit/tasks/test_tasks.py
+++ b/t/unit/tasks/test_tasks.py
@@ -144,6 +144,27 @@ def retry_task_auto_retry_exception_with_new_args(self, ret=None, place_holder=N
self.retry_task_auto_retry_exception_with_new_args = retry_task_auto_retry_exception_with_new_args
+ @self.app.task(bind=True, max_retries=10, iterations=0, shared=False,
+ autoretry_for=(Exception,))
+ def retry_task_max_retries_override(self, **kwargs):
+ # Test for #6436
+ self.iterations += 1
+ if self.iterations == 3:
+ # I wanna force fail here cause i have enough
+ self.retry(exc=MyCustomException, max_retries=0)
+ self.retry(exc=MyCustomException)
+
+ self.retry_task_max_retries_override = retry_task_max_retries_override
+
+ @self.app.task(bind=True, max_retries=0, iterations=0, shared=False,
+ autoretry_for=(Exception,))
+ def retry_task_explicit_exception(self, **kwargs):
+ # Test for #6436
+ self.iterations += 1
+ raise MyCustomException()
+
+ self.retry_task_explicit_exception = retry_task_explicit_exception
+
@self.app.task(bind=True, max_retries=3, iterations=0, shared=False)
def retry_task_raise_without_throw(self, **kwargs):
self.iterations += 1
@@ -432,6 +453,22 @@ def test_eager_retry_with_new_params(self):
def test_eager_retry_with_autoretry_for_exception(self):
assert self.retry_task_auto_retry_exception_with_new_args.si(place_holder="test").apply().get() == "test"
+ def test_retry_task_max_retries_override(self):
+ self.retry_task_max_retries_override.max_retries = 10
+ self.retry_task_max_retries_override.iterations = 0
+ result = self.retry_task_max_retries_override.apply()
+ with pytest.raises(MyCustomException):
+ result.get()
+ assert self.retry_task_max_retries_override.iterations == 3
+
+ def test_retry_task_explicit_exception(self):
+ self.retry_task_explicit_exception.max_retries = 0
+ self.retry_task_explicit_exception.iterations = 0
+ result = self.retry_task_explicit_exception.apply()
+ with pytest.raises(MyCustomException):
+ result.get()
+ assert self.retry_task_explicit_exception.iterations == 1
+
def test_retry_eager_should_return_value(self):
self.retry_task.max_retries = 3
self.retry_task.iterations = 0
From 8fee0bfeb91fc9483a041bfd169b534a8aa86bf6 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 2 Nov 2020 18:11:03 +0200
Subject: [PATCH 104/157] Happify linter.
---
celery/app/autoretry.py | 6 +++---
celery/exceptions.py | 1 -
t/integration/test_canvas.py | 1 +
t/unit/worker/test_request.py | 2 +-
4 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/celery/app/autoretry.py b/celery/app/autoretry.py
index 5da8487bd5f..a22b9f04717 100644
--- a/celery/app/autoretry.py
+++ b/celery/app/autoretry.py
@@ -48,9 +48,9 @@ def run(*args, **kwargs):
full_jitter=retry_jitter)
# Override max_retries
if hasattr(task, 'override_max_retries'):
- retry_kwargs['max_retries'] = getattr(task,
- 'override_max_retries',
- task.max_retries)
+ retry_kwargs['max_retries'] = getattr(task,
+ 'override_max_retries',
+ task.max_retries)
ret = task.retry(exc=exc, **retry_kwargs)
# Stop propagation
if hasattr(task, 'override_max_retries'):
diff --git a/celery/exceptions.py b/celery/exceptions.py
index ee903290f2f..768cd4d22d2 100644
--- a/celery/exceptions.py
+++ b/celery/exceptions.py
@@ -293,4 +293,3 @@ def __init__(self, *args, **kwargs):
def __repr__(self):
return super().__repr__() + " state:" + self.state + " task_id:" + self.task_id
-
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index 4ae027fb10a..34b8099674c 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -1239,6 +1239,7 @@ class test_signature_serialization:
signature object was not properly deserialized from its dictionary
representation, and would explode later on if it were used as a signature.
"""
+
def test_rebuild_nested_chain_chain(self, manager):
sig = chain(
tasks.return_nested_signature_chain_chain.s(),
diff --git a/t/unit/worker/test_request.py b/t/unit/worker/test_request.py
index 3ed7c553d15..d63ccbb1147 100644
--- a/t/unit/worker/test_request.py
+++ b/t/unit/worker/test_request.py
@@ -1205,7 +1205,7 @@ def test_execute_using_pool_with_none_timelimit_header(self):
def test_execute_using_pool__defaults_of_hybrid_to_proto2(self):
weakref_ref = Mock(name='weakref.ref')
headers = strategy.hybrid_to_proto2(Mock(headers=None), {'id': uuid(),
- 'task': self.mytask.name})[1]
+ 'task': self.mytask.name})[1]
job = self.zRequest(revoked_tasks=set(), ref=weakref_ref, **headers)
job.execute_using_pool(self.pool)
assert job._apply_result
From 6cf4f40fc58fe8585721f4df95a1c77d25106dbf Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 2 Nov 2020 18:11:26 +0200
Subject: [PATCH 105/157] Raise proper error when replacing with an empty
chain. (#6452)
Fixes #6451.
---
celery/app/task.py | 7 ++++++-
t/integration/tasks.py | 5 +++++
t/integration/test_canvas.py | 16 ++++++++++++----
3 files changed, 23 insertions(+), 5 deletions(-)
diff --git a/celery/app/task.py b/celery/app/task.py
index cab270cfa30..2265ebb9e67 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -8,7 +8,7 @@
from celery import current_app, group, states
from celery._state import _task_stack
-from celery.canvas import signature
+from celery.canvas import _chain, signature
from celery.exceptions import (Ignore, ImproperlyConfigured,
MaxRetriesExceededError, Reject, Retry)
from celery.local import class_property
@@ -882,6 +882,11 @@ def replace(self, sig):
link=self.request.callbacks,
link_error=self.request.errbacks,
)
+ elif isinstance(sig, _chain):
+ if not sig.tasks:
+ raise ImproperlyConfigured(
+ "Cannot replace with an empty chain"
+ )
if self.request.chain:
# We need to freeze the new signature with the current task's ID to
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
index 8aa13bc1797..1aaeed32378 100644
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -100,6 +100,11 @@ def replace_with_chain_which_raises(self, *args, link_msg=None):
return self.replace(c)
+@shared_task(bind=True)
+def replace_with_empty_chain(self, *_):
+ return self.replace(chain())
+
+
@shared_task(bind=True)
def add_to_all(self, nums, val):
"""Add the given value to all supplied numbers."""
diff --git a/t/integration/test_canvas.py b/t/integration/test_canvas.py
index 34b8099674c..fe594807ee5 100644
--- a/t/integration/test_canvas.py
+++ b/t/integration/test_canvas.py
@@ -6,7 +6,7 @@
from celery import chain, chord, group, signature
from celery.backends.base import BaseKeyValueStoreBackend
-from celery.exceptions import TimeoutError
+from celery.exceptions import ImproperlyConfigured, TimeoutError
from celery.result import AsyncResult, GroupResult, ResultSet
from . import tasks
@@ -15,9 +15,10 @@
add_to_all, add_to_all_to_chord, build_chain_inside_task,
chord_error, collect_ids, delayed_sum,
delayed_sum_with_soft_guard, fail, identity, ids,
- print_unicode, raise_error, redis_echo, retry_once,
- return_exception, return_priority, second_order_replace1,
- tsum, replace_with_chain, replace_with_chain_which_raises)
+ print_unicode, raise_error, redis_echo,
+ replace_with_chain, replace_with_chain_which_raises,
+ replace_with_empty_chain, retry_once, return_exception,
+ return_priority, second_order_replace1, tsum)
RETRYABLE_EXCEPTIONS = (OSError, ConnectionError, TimeoutError)
@@ -584,6 +585,13 @@ def test_chain_with_eb_replaced_with_chain_with_eb(self, manager):
assert redis_connection.blpop('redis-echo', min(1, TIMEOUT)) is None
redis_connection.delete('redis-echo')
+ def test_replace_chain_with_empty_chain(self, manager):
+ r = chain(identity.s(1), replace_with_empty_chain.s()).delay()
+
+ with pytest.raises(ImproperlyConfigured,
+ match="Cannot replace with an empty chain"):
+ r.get(timeout=TIMEOUT)
+
class test_result_set:
From 8bed67cbc85fb1f7ee71e2cd50cd76ec36ea521c Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 2 Nov 2020 19:51:28 +0200
Subject: [PATCH 106/157] Update changelog.
---
Changelog.rst | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/Changelog.rst b/Changelog.rst
index 14c2b0b0b4c..b65686a6708 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -8,6 +8,26 @@ This document contains change notes for bugfix & new features
in the 5.0.x series, please see :ref:`whatsnew-5.0` for
an overview of what's new in Celery 5.0.
+.. _version-5.0.2:
+
+5.0.2
+=====
+:release-date: 2020-11-02 8.00 P.M UTC+2:00
+:release-by: Omer Katz
+
+- Fix _autodiscover_tasks_from_fixups (#6424).
+- Flush worker prints, notably the banner (#6432).
+- **Breaking Change**: Remove `ha_policy` from queue definition. (#6440)
+
+ This argument has no effect since RabbitMQ 3.0.
+ Therefore, We feel comfortable dropping it in a patch release.
+
+- Python 3.9 support (#6418).
+- **Regression**: When using the prefork pool, pick the fair scheduling strategy by default (#6447).
+- Preserve callbacks when replacing a task with a chain (#6189).
+- Fix max_retries override on `self.retry()` (#6436).
+- Raise proper error when replacing with an empty chain (#6452)
+
.. _version-5.0.1:
5.0.1
From f50cf7d9944558167b85c14d73e8f790da251730 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Mon, 2 Nov 2020 19:52:00 +0200
Subject: [PATCH 107/157] =?UTF-8?q?Bump=20version:=205.0.1=20=E2=86=92=205?=
=?UTF-8?q?.0.2?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.bumpversion.cfg | 2 +-
README.rst | 6 +++---
celery/__init__.py | 2 +-
docs/includes/introduction.txt | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index ea5f7e924c0..7be80a9bab6 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 5.0.1
+current_version = 5.0.2
commit = True
tag = True
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(?P[a-z\d]+)?
diff --git a/README.rst b/README.rst
index 25b5e2c8e2b..529669641d9 100644
--- a/README.rst
+++ b/README.rst
@@ -2,7 +2,7 @@
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge|
-:Version: 5.0.1 (singularity)
+:Version: 5.0.2 (singularity)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
@@ -57,7 +57,7 @@ in such a way that the client enqueues an URL to be requested by a worker.
What do I need?
===============
-Celery version 5.0.1 runs on,
+Celery version 5.0.2 runs on,
- Python (3.6, 3.7, 3.8)
- PyPy3.6 (7.6)
@@ -89,7 +89,7 @@ Get Started
===========
If this is the first time you're trying to use Celery, or you're
-new to Celery 5.0.1 coming from previous versions then you should read our
+new to Celery 5.0.2 coming from previous versions then you should read our
getting started tutorials:
- `First steps with Celery`_
diff --git a/celery/__init__.py b/celery/__init__.py
index a9f497130e7..7ed8e28cb0a 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -17,7 +17,7 @@
SERIES = 'singularity'
-__version__ = '5.0.1'
+__version__ = '5.0.2'
__author__ = 'Ask Solem'
__contact__ = 'auvipy@gmail.com'
__homepage__ = 'http://celeryproject.org'
diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt
index 188fd291478..a19bd2a012a 100644
--- a/docs/includes/introduction.txt
+++ b/docs/includes/introduction.txt
@@ -1,4 +1,4 @@
-:Version: 5.0.1 (cliffs)
+:Version: 5.0.2 (cliffs)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
From 7434545f55a2de4b6c636ca69fac1ede455bc449 Mon Sep 17 00:00:00 2001
From: Maarten Fonville
Date: Mon, 2 Nov 2020 21:44:55 +0100
Subject: [PATCH 108/157] Update daemonizing.rst
Improved systemd documentation for auto-start of the service, and mention the possibility to depend on RabbitMQ service. Also add Restart=always for Celery Beat example
---
docs/userguide/daemonizing.rst | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/docs/userguide/daemonizing.rst b/docs/userguide/daemonizing.rst
index ae804f6c32e..8b74f73bfb4 100644
--- a/docs/userguide/daemonizing.rst
+++ b/docs/userguide/daemonizing.rst
@@ -415,6 +415,12 @@ This is an example systemd file:
Once you've put that file in :file:`/etc/systemd/system`, you should run
:command:`systemctl daemon-reload` in order that Systemd acknowledges that file.
You should also run that command each time you modify it.
+Use :command:`systemctl enable celery.service` if you want the celery service to
+automatically start when (re)booting the system.
+
+Optionally you can specify extra dependencies for the celery service: e.g. if you use
+RabbitMQ as a broker, you could specify ``rabbitmq-server.service`` in both ``After=`` and ``Requires=``
+in the ``[Unit]`` `systemd section `_.
To configure user, group, :command:`chdir` change settings:
``User``, ``Group``, and ``WorkingDirectory`` defined in
@@ -496,10 +502,16 @@ This is an example systemd file for Celery Beat:
ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
--pidfile=${CELERYBEAT_PID_FILE} \
--logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
+ Restart=always
[Install]
WantedBy=multi-user.target
+Once you've put that file in :file:`/etc/systemd/system`, you should run
+:command:`systemctl daemon-reload` in order that Systemd acknowledges that file.
+You should also run that command each time you modify it.
+Use :command:`systemctl enable celerybeat.service` if you want the celery beat
+service to automatically start when (re)booting the system.
Running the worker with superuser privileges (root)
======================================================================
From d2a9b74b2122b2af0c5f219bc5800928870bd532 Mon Sep 17 00:00:00 2001
From: Maarten Fonville
Date: Mon, 2 Nov 2020 22:01:03 +0100
Subject: [PATCH 109/157] Update celerybeat.service
---
extra/systemd/celerybeat.service | 1 +
1 file changed, 1 insertion(+)
diff --git a/extra/systemd/celerybeat.service b/extra/systemd/celerybeat.service
index 8cb2ad3687e..c1b2034dcdd 100644
--- a/extra/systemd/celerybeat.service
+++ b/extra/systemd/celerybeat.service
@@ -11,6 +11,7 @@ WorkingDirectory=/opt/celery
ExecStart=/bin/sh -c '${CELERY_BIN} -A ${CELERY_APP} beat \
--pidfile=${CELERYBEAT_PID_FILE} \
--logfile=${CELERYBEAT_LOG_FILE} --loglevel=${CELERYD_LOG_LEVEL}'
+Restart=always
[Install]
WantedBy=multi-user.target
From 25eb27b210dce6a7771671b77fe6a385ce7d7eaa Mon Sep 17 00:00:00 2001
From: Mathieu Rollet
Date: Tue, 3 Nov 2020 10:58:48 +0100
Subject: [PATCH 110/157] Fix old celery beat variables
Change made 5 days ago in 7c3da03a07882ca86b801ad78dd509a67cba60af is faulty, the correct celery beat variables do start with `CELERYBEAT` and not `CELERY_BEAT`
---
docs/userguide/configuration.rst | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index f942188d07d..0e3b8376fa0 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -65,11 +65,11 @@ have been moved into a new ``task_`` prefix.
``CELERY_IMPORTS`` :setting:`imports`
``CELERY_INCLUDE`` :setting:`include`
``CELERY_TIMEZONE`` :setting:`timezone`
-``CELERY_BEAT_MAX_LOOP_INTERVAL`` :setting:`beat_max_loop_interval`
-``CELERY_BEAT_SCHEDULE`` :setting:`beat_schedule`
-``CELERY_BEAT_SCHEDULER`` :setting:`beat_scheduler`
-``CELERY_BEAT_SCHEDULE_FILENAME`` :setting:`beat_schedule_filename`
-``CELERY_BEAT_SYNC_EVERY`` :setting:`beat_sync_every`
+``CELERYBEAT_MAX_LOOP_INTERVAL`` :setting:`beat_max_loop_interval`
+``CELERYBEAT_SCHEDULE`` :setting:`beat_schedule`
+``CELERYBEAT_SCHEDULER`` :setting:`beat_scheduler`
+``CELERYBEAT_SCHEDULE_FILENAME`` :setting:`beat_schedule_filename`
+``CELERYBEAT_SYNC_EVERY`` :setting:`beat_sync_every`
``BROKER_URL`` :setting:`broker_url`
``BROKER_TRANSPORT`` :setting:`broker_transport`
``BROKER_TRANSPORT_OPTIONS`` :setting:`broker_transport_options`
From 762d2e6e12d56e61274d2ef3d279864e99520dcd Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Tue, 3 Nov 2020 14:03:11 +0200
Subject: [PATCH 111/157] Fix formatting.
---
docs/whatsnew-5.0.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 9360a5b9588..3f93ce3e979 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -291,7 +291,7 @@ Starting from Celery 5.0, the pytest plugin is no longer enabled by default.
Please refer to the :ref:`documentation ` for instructions.
Ordered Group Results for the Redis Result Backend
--------------------------------------------------
+--------------------------------------------------
Previously group results were not ordered by their invocation order.
Celery 4.4.7 introduced an opt-in feature to make them ordered.
From db35ca1ccaebff02e1fe36912963803d277b4979 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Tue, 3 Nov 2020 14:10:30 +0200
Subject: [PATCH 112/157] Fix formatting.
---
docs/userguide/configuration.rst | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index 0e3b8376fa0..e9c1c76c151 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -65,11 +65,11 @@ have been moved into a new ``task_`` prefix.
``CELERY_IMPORTS`` :setting:`imports`
``CELERY_INCLUDE`` :setting:`include`
``CELERY_TIMEZONE`` :setting:`timezone`
-``CELERYBEAT_MAX_LOOP_INTERVAL`` :setting:`beat_max_loop_interval`
-``CELERYBEAT_SCHEDULE`` :setting:`beat_schedule`
-``CELERYBEAT_SCHEDULER`` :setting:`beat_scheduler`
-``CELERYBEAT_SCHEDULE_FILENAME`` :setting:`beat_schedule_filename`
-``CELERYBEAT_SYNC_EVERY`` :setting:`beat_sync_every`
+``CELERYBEAT_MAX_LOOP_INTERVAL`` :setting:`beat_max_loop_interval`
+``CELERYBEAT_SCHEDULE`` :setting:`beat_schedule`
+``CELERYBEAT_SCHEDULER`` :setting:`beat_scheduler`
+``CELERYBEAT_SCHEDULE_FILENAME`` :setting:`beat_schedule_filename`
+``CELERYBEAT_SYNC_EVERY`` :setting:`beat_sync_every`
``BROKER_URL`` :setting:`broker_url`
``BROKER_TRANSPORT`` :setting:`broker_transport`
``BROKER_TRANSPORT_OPTIONS`` :setting:`broker_transport_options`
From 42361bdd2cb858d24a896d447448b2a6bb47307d Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Wed, 4 Nov 2020 00:31:28 +1100
Subject: [PATCH 113/157] fix: Make `--workdir` eager for early handling
(#6457)
This change makes the `--workdir` options an eager one which `click`
will process early for us, before any of the others. At the same time,
we add a callback which ensures that the `chdir()` is run during
handling of the argument so that all subsequent actions (e.g. app
loading) occur in the specified working directory.
Fixes #6445
---
celery/bin/celery.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
index 5488d17c40e..6626c21fa64 100644
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -1,5 +1,6 @@
"""Celery Command Line Interface."""
import os
+import pathlib
import traceback
import click
@@ -94,6 +95,9 @@ def convert(self, value, param, ctx):
help_group="Global Options")
@click.option('--workdir',
cls=CeleryOption,
+ type=pathlib.Path,
+ callback=lambda _, __, wd: os.chdir(wd) if wd else None,
+ is_eager=True,
help_group="Global Options")
@click.option('-C',
'--no-color',
@@ -121,8 +125,6 @@ def celery(ctx, app, broker, result_backend, loader, config, workdir,
click.echo(ctx.get_help())
ctx.exit()
- if workdir:
- os.chdir(workdir)
if loader:
# Default app takes loader from this env (Issue #1066).
os.environ['CELERY_LOADER'] = loader
From 84951b1441ef242c75fe48e2100783b3081487c0 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Tue, 3 Nov 2020 16:52:41 +0200
Subject: [PATCH 114/157] Fix example.
Fixes #6459.
---
examples/next-steps/proj/celery.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/examples/next-steps/proj/celery.py b/examples/next-steps/proj/celery.py
index f9be2a1c549..39ce69199a9 100644
--- a/examples/next-steps/proj/celery.py
+++ b/examples/next-steps/proj/celery.py
@@ -2,7 +2,7 @@
app = Celery('proj',
broker='amqp://',
- backend='amqp://',
+ backend='rpc://',
include=['proj.tasks'])
# Optional configuration, see the application user guide.
From 406f04a082949ac42ec7a4af94fed896c515aaa4 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 5 Nov 2020 08:59:10 +0200
Subject: [PATCH 115/157] When using the MongoDB backend, don't cleanup if
result_expires is 0 or None. (#6462)
Fixes #6450.
---
celery/backends/mongodb.py | 3 +++
t/unit/backends/test_mongodb.py | 6 ++++++
2 files changed, 9 insertions(+)
diff --git a/celery/backends/mongodb.py b/celery/backends/mongodb.py
index 5ae3ddf8223..76eab766b75 100644
--- a/celery/backends/mongodb.py
+++ b/celery/backends/mongodb.py
@@ -248,6 +248,9 @@ def _forget(self, task_id):
def cleanup(self):
"""Delete expired meta-data."""
+ if not self.expires:
+ return
+
self.collection.delete_many(
{'date_done': {'$lt': self.app.now() - self.expires_delta}},
)
diff --git a/t/unit/backends/test_mongodb.py b/t/unit/backends/test_mongodb.py
index fb304b7e369..5a391d86d30 100644
--- a/t/unit/backends/test_mongodb.py
+++ b/t/unit/backends/test_mongodb.py
@@ -485,6 +485,12 @@ def test_cleanup(self, mock_get_database):
mock_get_database.assert_called_once_with()
mock_collection.delete_many.assert_called()
+ self.backend.collections = mock_collection = Mock()
+ self.backend.expires = None
+
+ self.backend.cleanup()
+ mock_collection.delete_many.assert_not_called()
+
def test_get_database_authfailure(self):
x = MongoBackend(app=self.app)
x._get_connection = Mock()
From b038786209250bfd77de0732fc344fc204e7e54a Mon Sep 17 00:00:00 2001
From: Mike DePalatis
Date: Sun, 8 Nov 2020 06:53:00 -0700
Subject: [PATCH 116/157] Add missing space (#6468)
---
celery/bin/worker.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index 0472fde4c4b..db1c125a185 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -135,7 +135,7 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
type=click.Path(),
callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_state_db,
help_group="Worker Options",
- help="Path to the state database. The extension '.db' may be"
+ help="Path to the state database. The extension '.db' may be "
"appended to the filename.")
@click.option('-l',
'--loglevel',
From 366264ee00fb0ecb9c8a7cf06438cd9e05da107b Mon Sep 17 00:00:00 2001
From: partizan
Date: Sun, 8 Nov 2020 15:55:22 +0200
Subject: [PATCH 117/157] Fix passing queues into purge command (#6469)
In current wersion calling `celery --app my.celery_app purge -Q queue_name` is failing with following trace:
```
names = (queues or set(app.amqp.queues.keys())) - exclude_queues
TypeError: unsupported operand type(s) for -: 'list' and 'list'
```
Becouse code is expecting set and `queues` is actually a list.
Here is a fix.
---
celery/bin/purge.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/celery/bin/purge.py b/celery/bin/purge.py
index 38245d02ff0..609a9a0f660 100644
--- a/celery/bin/purge.py
+++ b/celery/bin/purge.py
@@ -32,10 +32,10 @@ def purge(ctx, force, queues, exclude_queues):
There's no undo operation for this command.
"""
- queues = queues or set()
- exclude_queues = exclude_queues or set()
app = ctx.obj.app
- names = (queues or set(app.amqp.queues.keys())) - exclude_queues
+ queues = set(queues or app.amqp.queues.keys())
+ exclude_queues = set(exclude_queues or [])
+ names = queues - exclude_queues
qnum = len(names)
if names:
From 65fc5f49a58e9d0da433376c0d80eafaa01c2622 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Tue, 3 Nov 2020 15:50:45 +0200
Subject: [PATCH 118/157] Change donations sidebar to direct users to
OpenCollective.
---
docs/_templates/sidebardonations.html | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)
diff --git a/docs/_templates/sidebardonations.html b/docs/_templates/sidebardonations.html
index d6e6dfaa788..9049cab2cab 100644
--- a/docs/_templates/sidebardonations.html
+++ b/docs/_templates/sidebardonations.html
@@ -2,12 +2,7 @@
allowtransparency="true" frameborder="0" scrolling="0" width="200px" height="35px">
-
Please help support this community project with a donation:
-
+
Donations
+
Please help support this community project with a donation.
+
From 2a6c7cfe3b1283961887bf1cb3f5aa6c8aa70820 Mon Sep 17 00:00:00 2001
From: Nick Pope
Date: Tue, 10 Nov 2020 13:49:04 +0000
Subject: [PATCH 119/157] Added pytest to extras.
Missed in 9a6c2923e859b6993227605610255bd632c1ae68.
---
setup.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/setup.py b/setup.py
index c5843c28321..35f2dd6b084 100644
--- a/setup.py
+++ b/setup.py
@@ -33,6 +33,7 @@
'msgpack',
'pymemcache',
'pyro',
+ 'pytest',
'redis',
's3',
'slmq',
From 28ebcce5d277839011f7782755ac8452b37d6afe Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Tue, 17 Nov 2020 09:34:46 +0200
Subject: [PATCH 120/157] Restore app.start() and app.worker_main() (#6481)
* Restore `app.start()` and `app.worker_main()`.
* Update celery/app/base.py
Co-authored-by: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
* Fix spelling error.
Co-authored-by: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
---
celery/app/base.py | 26 ++++++++++++++++++++++++++
1 file changed, 26 insertions(+)
diff --git a/celery/app/base.py b/celery/app/base.py
index ab9433a8a4e..ed4bd748b56 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -1,12 +1,14 @@
"""Actual App instance implementation."""
import inspect
import os
+import sys
import threading
import warnings
from collections import UserDict, defaultdict, deque
from datetime import datetime
from operator import attrgetter
+from click.exceptions import Exit
from kombu import pools
from kombu.clocks import LamportClock
from kombu.common import oid_from
@@ -342,6 +344,30 @@ def close(self):
self._pool = None
_deregister_app(self)
+ def start(self, argv=None):
+ from celery.bin.celery import celery
+
+ celery.params[0].default = self
+
+ try:
+ celery.main(args=argv, standalone_mode=False)
+ except Exit as e:
+ return e.exit_code
+ finally:
+ celery.params[0].default = None
+
+ def worker_main(self, argv=None):
+ if argv is None:
+ argv = sys.argv
+
+ if 'worker' not in argv:
+ raise ValueError(
+ "The worker sub-command must be specified in argv.\n"
+ "Use app.start() to programmatically start other commands."
+ )
+
+ self.start(argv=argv)
+
def task(self, *args, **opts):
"""Decorator to create a task class out of any callable.
From 60ba37900a038420aec0fc76e60c55989f66c718 Mon Sep 17 00:00:00 2001
From: maybe-sybr <58414429+maybe-sybr@users.noreply.github.com>
Date: Wed, 18 Nov 2020 17:45:23 +1100
Subject: [PATCH 121/157] fix: `node_format()` logfile before detaching
Fixes #6426
---
celery/bin/worker.py | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index db1c125a185..cd826b89b17 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -94,6 +94,11 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
executable=None, hostname=None):
"""Detach program by argv."""
fake = 1 if C_FAKEFORK else fake
+ # `detached()` will attempt to touch the logfile to confirm that error
+ # messages won't be lost after detaching stdout/err, but this means we need
+ # to pre-format it rather than relying on `setup_logging_subsystem()` like
+ # we can elsewhere.
+ logfile = node_format(logfile, hostname)
with detached(logfile, pidfile, uid, gid, umask, workdir, fake,
after_forkers=False):
try:
From e2031688284484d5b5a57ba29cd9cae2d9a81e39 Mon Sep 17 00:00:00 2001
From: Matus Valo
Date: Sun, 22 Nov 2020 16:59:14 +0100
Subject: [PATCH 122/157] Multithreaded backend (#6416)
* Cache backend to thread local storage instead of global variable
* Cache oid to thread local storage instead of global variable
* Improve code returning thread_local data
* Move thread local storage to Celery class, introduced thread_oid and added unittests
---
celery/app/base.py | 24 +++++++++++++--
celery/backends/rpc.py | 4 +--
celery/canvas.py | 2 +-
t/unit/app/test_app.py | 59 +++++++++++++++++++++++++++++++++++++
t/unit/backends/test_rpc.py | 17 ++++++++++-
t/unit/tasks/test_chord.py | 7 ++---
t/unit/tasks/test_result.py | 47 ++++++++++++++---------------
t/unit/test_canvas.py | 33 +++++++++++++++++++++
8 files changed, 159 insertions(+), 34 deletions(-)
create mode 100644 t/unit/test_canvas.py
diff --git a/celery/app/base.py b/celery/app/base.py
index ed4bd748b56..27e5b610ca7 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -206,6 +206,8 @@ class name.
task_cls = 'celery.app.task:Task'
registry_cls = 'celery.app.registry:TaskRegistry'
+ #: Thread local storage.
+ _local = None
_fixups = None
_pool = None
_conf = None
@@ -229,6 +231,9 @@ def __init__(self, main=None, loader=None, backend=None,
changes=None, config_source=None, fixups=None, task_cls=None,
autofinalize=True, namespace=None, strict_typing=True,
**kwargs):
+
+ self._local = threading.local()
+
self.clock = LamportClock()
self.main = main
self.amqp_cls = amqp or self.amqp_cls
@@ -727,7 +732,7 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
task_id, name, args, kwargs, countdown, eta, group_id, group_index,
expires, retries, chord,
maybe_list(link), maybe_list(link_error),
- reply_to or self.oid, time_limit, soft_time_limit,
+ reply_to or self.thread_oid, time_limit, soft_time_limit,
self.conf.task_send_sent_event,
root_id, parent_id, shadow, chain,
argsrepr=options.get('argsrepr'),
@@ -1185,15 +1190,28 @@ def oid(self):
# which would not work if each thread has a separate id.
return oid_from(self, threads=False)
+ @property
+ def thread_oid(self):
+ """Per-thread unique identifier for this app."""
+ try:
+ return self._local.oid
+ except AttributeError:
+ self._local.oid = new_oid = oid_from(self, threads=True)
+ return new_oid
+
@cached_property
def amqp(self):
"""AMQP related functionality: :class:`~@amqp`."""
return instantiate(self.amqp_cls, app=self)
- @cached_property
+ @property
def backend(self):
"""Current backend instance."""
- return self._get_backend()
+ try:
+ return self._local.backend
+ except AttributeError:
+ self._local.backend = new_backend = self._get_backend()
+ return new_backend
@property
def conf(self):
diff --git a/celery/backends/rpc.py b/celery/backends/rpc.py
index 9b851db4de8..399c1dc7a20 100644
--- a/celery/backends/rpc.py
+++ b/celery/backends/rpc.py
@@ -338,5 +338,5 @@ def binding(self):
@cached_property
def oid(self):
- # cached here is the app OID: name of queue we receive results on.
- return self.app.oid
+ # cached here is the app thread OID: name of queue we receive results on.
+ return self.app.thread_oid
diff --git a/celery/canvas.py b/celery/canvas.py
index 0279965d2ee..a4de76428dc 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -296,7 +296,7 @@ def freeze(self, _id=None, group_id=None, chord=None,
if parent_id:
opts['parent_id'] = parent_id
if 'reply_to' not in opts:
- opts['reply_to'] = self.app.oid
+ opts['reply_to'] = self.app.thread_oid
if group_id and "group_id" not in opts:
opts['group_id'] = group_id
if chord:
diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
index a533d0cc4d4..2512b16cd4f 100644
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -2,6 +2,7 @@
import itertools
import os
import ssl
+import uuid
from copy import deepcopy
from datetime import datetime, timedelta
from pickle import dumps, loads
@@ -17,6 +18,7 @@
from celery.app import base as _appbase
from celery.app import defaults
from celery.exceptions import ImproperlyConfigured
+from celery.backends.base import Backend
from celery.loaders.base import unconfigured
from celery.platforms import pyimplementation
from celery.utils.collections import DictAttribute
@@ -987,6 +989,63 @@ class CustomCelery(type(self.app)):
app = CustomCelery(set_as_current=False)
assert isinstance(app.tasks, TaskRegistry)
+ def test_oid(self):
+ # Test that oid is global value.
+ oid1 = self.app.oid
+ oid2 = self.app.oid
+ uuid.UUID(oid1)
+ uuid.UUID(oid2)
+ assert oid1 == oid2
+
+ def test_global_oid(self):
+ # Test that oid is global value also within threads
+ main_oid = self.app.oid
+ uuid.UUID(main_oid)
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: self.app.oid)
+ thread_oid = future.result()
+ uuid.UUID(thread_oid)
+ assert main_oid == thread_oid
+
+ def test_thread_oid(self):
+ # Test that thread_oid is global value in single thread.
+ oid1 = self.app.thread_oid
+ oid2 = self.app.thread_oid
+ uuid.UUID(oid1)
+ uuid.UUID(oid2)
+ assert oid1 == oid2
+
+ def test_backend(self):
+ # Test that app.bakend returns the same backend in single thread
+ backend1 = self.app.backend
+ backend2 = self.app.backend
+ assert isinstance(backend1, Backend)
+ assert isinstance(backend2, Backend)
+ assert backend1 is backend2
+
+ def test_thread_backend(self):
+ # Test that app.bakend returns the new backend for each thread
+ main_backend = self.app.backend
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: self.app.backend)
+ thread_backend = future.result()
+ assert isinstance(main_backend, Backend)
+ assert isinstance(thread_backend, Backend)
+ assert main_backend is not thread_backend
+
+ def test_thread_oid_is_local(self):
+ # Test that thread_oid is local to thread.
+ main_oid = self.app.thread_oid
+ uuid.UUID(main_oid)
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: self.app.thread_oid)
+ thread_oid = future.result()
+ uuid.UUID(thread_oid)
+ assert main_oid != thread_oid
+
class test_defaults:
diff --git a/t/unit/backends/test_rpc.py b/t/unit/backends/test_rpc.py
index f8567400706..71e573da8ff 100644
--- a/t/unit/backends/test_rpc.py
+++ b/t/unit/backends/test_rpc.py
@@ -1,3 +1,4 @@
+import uuid
from unittest.mock import Mock, patch
import pytest
@@ -28,8 +29,22 @@ def setup(self):
def test_oid(self):
oid = self.b.oid
oid2 = self.b.oid
+ assert uuid.UUID(oid)
assert oid == oid2
- assert oid == self.app.oid
+ assert oid == self.app.thread_oid
+
+ def test_oid_threads(self):
+ # Verify that two RPC backends executed in different threads
+ # has different oid.
+ oid = self.b.oid
+ from concurrent.futures import ThreadPoolExecutor
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(lambda: RPCBackend(app=self.app).oid)
+ thread_oid = future.result()
+ assert uuid.UUID(oid)
+ assert uuid.UUID(thread_oid)
+ assert oid == self.app.thread_oid
+ assert thread_oid != oid
def test_interface(self):
self.b.on_reply_declare('task_id')
diff --git a/t/unit/tasks/test_chord.py b/t/unit/tasks/test_chord.py
index e25e2ccc229..bbec557831a 100644
--- a/t/unit/tasks/test_chord.py
+++ b/t/unit/tasks/test_chord.py
@@ -1,5 +1,5 @@
from contextlib import contextmanager
-from unittest.mock import Mock, patch, sentinel
+from unittest.mock import Mock, patch, sentinel, PropertyMock
import pytest
@@ -294,9 +294,8 @@ def adds(self, sig, lazy=False):
return self.add_to_chord(sig, lazy)
self.adds = adds
+ @patch('celery.Celery.backend', new=PropertyMock(name='backend'))
def test_add_to_chord(self):
- self.app.backend = Mock(name='backend')
-
sig = self.add.s(2, 2)
sig.delay = Mock(name='sig.delay')
self.adds.request.group = uuid()
@@ -333,8 +332,8 @@ def test_add_to_chord(self):
class test_Chord_task(ChordCase):
+ @patch('celery.Celery.backend', new=PropertyMock(name='backend'))
def test_run(self):
- self.app.backend = Mock()
self.app.backend.cleanup = Mock()
self.app.backend.cleanup.__name__ = 'cleanup'
Chord = self.app.tasks['celery.chord']
diff --git a/t/unit/tasks/test_result.py b/t/unit/tasks/test_result.py
index e3d06db0f30..d16dc9eae26 100644
--- a/t/unit/tasks/test_result.py
+++ b/t/unit/tasks/test_result.py
@@ -708,19 +708,19 @@ def test_get_nested_without_native_join(self):
]),
]),
])
- ts.app.backend = backend
- vals = ts.get()
- assert vals == [
- '1.1',
- [
- '2.1',
+ with patch('celery.Celery.backend', new=backend):
+ vals = ts.get()
+ assert vals == [
+ '1.1',
[
- '3.1',
- '3.2',
- ]
- ],
- ]
+ '2.1',
+ [
+ '3.1',
+ '3.2',
+ ]
+ ],
+ ]
def test_getitem(self):
subs = [MockAsyncResultSuccess(uuid(), app=self.app),
@@ -771,15 +771,16 @@ def test_join_native(self):
results = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), results)
- ts.app.backend = backend
- backend.ids = [result.id for result in results]
- res = ts.join_native()
- assert res == list(range(10))
- callback = Mock(name='callback')
- assert not ts.join_native(callback=callback)
- callback.assert_has_calls([
- call(r.id, i) for i, r in enumerate(ts.results)
- ])
+
+ with patch('celery.Celery.backend', new=backend):
+ backend.ids = [result.id for result in results]
+ res = ts.join_native()
+ assert res == list(range(10))
+ callback = Mock(name='callback')
+ assert not ts.join_native(callback=callback)
+ callback.assert_has_calls([
+ call(r.id, i) for i, r in enumerate(ts.results)
+ ])
def test_join_native_raises(self):
ts = self.app.GroupResult(uuid(), [self.app.AsyncResult(uuid())])
@@ -813,9 +814,9 @@ def test_iter_native(self):
results = [self.app.AsyncResult(uuid(), backend=backend)
for i in range(10)]
ts = self.app.GroupResult(uuid(), results)
- ts.app.backend = backend
- backend.ids = [result.id for result in results]
- assert len(list(ts.iter_native())) == 10
+ with patch('celery.Celery.backend', new=backend):
+ backend.ids = [result.id for result in results]
+ assert len(list(ts.iter_native())) == 10
def test_join_timeout(self):
ar = MockAsyncResultSuccess(uuid(), app=self.app)
diff --git a/t/unit/test_canvas.py b/t/unit/test_canvas.py
new file mode 100644
index 00000000000..4ba7ba59f3e
--- /dev/null
+++ b/t/unit/test_canvas.py
@@ -0,0 +1,33 @@
+import uuid
+
+
+class test_Canvas:
+
+ def test_freeze_reply_to(self):
+ # Tests that Canvas.freeze() correctly
+ # creates reply_to option
+
+ @self.app.task
+ def test_task(a, b):
+ return
+
+ s = test_task.s(2, 2)
+ s.freeze()
+
+ from concurrent.futures import ThreadPoolExecutor
+
+ def foo():
+ s = test_task.s(2, 2)
+ s.freeze()
+ return self.app.thread_oid, s.options['reply_to']
+ with ThreadPoolExecutor(max_workers=1) as executor:
+ future = executor.submit(foo)
+ t_reply_to_app, t_reply_to_opt = future.result()
+
+ assert uuid.UUID(s.options['reply_to'])
+ assert uuid.UUID(t_reply_to_opt)
+ # reply_to must be equal to thread_oid of Application
+ assert self.app.thread_oid == s.options['reply_to']
+ assert t_reply_to_app == t_reply_to_opt
+ # reply_to must be thread-relative.
+ assert t_reply_to_opt != s.options['reply_to']
From dea0bd1672cf8d0017f4dae3dfc216278637f90a Mon Sep 17 00:00:00 2001
From: Matus Valo
Date: Tue, 24 Nov 2020 00:09:53 +0100
Subject: [PATCH 123/157] Remove python2 compatibility code
---
celery/app/trace.py | 20 +++++++---------
celery/backends/base.py | 20 +++++++---------
celery/backends/cassandra.py | 9 +++----
celery/concurrency/asynpool.py | 9 +------
celery/concurrency/thread.py | 6 -----
celery/local.py | 2 --
celery/platforms.py | 19 ---------------
celery/utils/collections.py | 42 +++++----------------------------
celery/utils/imports.py | 23 +++++++-----------
t/unit/app/test_log.py | 6 +----
t/unit/backends/test_base.py | 23 +++++-------------
t/unit/backends/test_mongodb.py | 3 ---
t/unit/tasks/test_trace.py | 39 ++++++++++++------------------
t/unit/utils/test_local.py | 3 ---
t/unit/worker/test_request.py | 27 +--------------------
15 files changed, 57 insertions(+), 194 deletions(-)
diff --git a/celery/app/trace.py b/celery/app/trace.py
index bb928f2f20b..f9b8c83e6e6 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -266,18 +266,14 @@ def traceback_clear(exc=None):
else:
_, _, tb = sys.exc_info()
- if sys.version_info >= (3, 5, 0):
- while tb is not None:
- try:
- tb.tb_frame.clear()
- tb.tb_frame.f_locals
- except RuntimeError:
- # Ignore the exception raised if the frame is still executing.
- pass
- tb = tb.tb_next
-
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear()
+ while tb is not None:
+ try:
+ tb.tb_frame.clear()
+ tb.tb_frame.f_locals
+ except RuntimeError:
+ # Ignore the exception raised if the frame is still executing.
+ pass
+ tb = tb.tb_next
def build_tracer(name, task, loader=None, hostname=None, store_errors=True,
diff --git a/celery/backends/base.py b/celery/backends/base.py
index 74fce23c3c4..1aac2a0fc95 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -261,18 +261,14 @@ def fail_from_current_stack(self, task_id, exc=None):
self.mark_as_failure(task_id, exc, exception_info.traceback)
return exception_info
finally:
- if sys.version_info >= (3, 5, 0):
- while tb is not None:
- try:
- tb.tb_frame.clear()
- tb.tb_frame.f_locals
- except RuntimeError:
- # Ignore the exception raised if the frame is still executing.
- pass
- tb = tb.tb_next
-
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear()
+ while tb is not None:
+ try:
+ tb.tb_frame.clear()
+ tb.tb_frame.f_locals
+ except RuntimeError:
+ # Ignore the exception raised if the frame is still executing.
+ pass
+ tb = tb.tb_next
del tb
diff --git a/celery/backends/cassandra.py b/celery/backends/cassandra.py
index 72bb33dfe9f..1220063b63c 100644
--- a/celery/backends/cassandra.py
+++ b/celery/backends/cassandra.py
@@ -1,5 +1,4 @@
"""Apache Cassandra result store backend using the DataStax driver."""
-import sys
import threading
from celery import states
@@ -60,11 +59,9 @@
USING TTL {0}
"""
-if sys.version_info[0] == 3:
- def buf_t(x):
- return bytes(x, 'utf8')
-else:
- buf_t = buffer # noqa
+
+def buf_t(x):
+ return bytes(x, 'utf8')
class CassandraBackend(BaseBackend):
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index 7ea3eb204c9..5f17f247d62 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -16,7 +16,6 @@
import gc
import os
import select
-import sys
import time
from collections import Counter, deque, namedtuple
from io import BytesIO
@@ -24,6 +23,7 @@
from pickle import HIGHEST_PROTOCOL
from time import sleep
from weakref import WeakValueDictionary, ref
+from struct import pack, unpack, unpack_from
from billiard import pool as _pool
from billiard.compat import buf_t, isblocking, setblocking
@@ -35,7 +35,6 @@
from kombu.utils.functional import fxrange
from vine import promise
-from celery.platforms import pack, unpack, unpack_from
from celery.utils.functional import noop
from celery.utils.log import get_logger
from celery.worker import state as worker_state
@@ -47,12 +46,6 @@
from _billiard import read as __read__
readcanbuf = True
- # unpack_from supports memoryview in 2.7.6 and 3.3+
- if sys.version_info[0] == 2 and sys.version_info < (2, 7, 6):
-
- def unpack_from(fmt, view, _unpack_from=unpack_from): # noqa
- return _unpack_from(fmt, view.tobytes()) # <- memoryview
-
except ImportError: # pragma: no cover
def __read__(fd, buf, size, read=os.read): # noqa
diff --git a/celery/concurrency/thread.py b/celery/concurrency/thread.py
index eb9c8683c7d..ffd2e507f11 100644
--- a/celery/concurrency/thread.py
+++ b/celery/concurrency/thread.py
@@ -1,6 +1,5 @@
"""Thread execution pool."""
-import sys
from concurrent.futures import ThreadPoolExecutor, wait
from .base import BasePool, apply_target
@@ -25,11 +24,6 @@ class TaskPool(BasePool):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
-
- # from 3.5, it is calculated from number of CPUs
- if (3, 0) <= sys.version_info < (3, 5) and self.limit is None:
- self.limit = 5
-
self.executor = ThreadPoolExecutor(max_workers=self.limit)
def on_stop(self):
diff --git a/celery/local.py b/celery/local.py
index 5fc32148ac1..f3803f40bec 100644
--- a/celery/local.py
+++ b/celery/local.py
@@ -539,8 +539,6 @@ def recreate_module(name, compat_modules=None, by_module=None, direct=None,
operator.add,
[tuple(v) for v in [compat_modules, origins, direct, attrs]],
)))
- if sys.version_info[0] < 3:
- _all = [s.encode() for s in _all]
cattrs = {
'_compat_modules': compat_modules,
'_all_by_module': by_module, '_direct': direct,
diff --git a/celery/platforms.py b/celery/platforms.py
index ebda45c49ca..452435be6ac 100644
--- a/celery/platforms.py
+++ b/celery/platforms.py
@@ -11,7 +11,6 @@
import os
import platform as _platform
import signal as _signal
-import struct
import sys
import warnings
from collections import namedtuple
@@ -797,21 +796,3 @@ def check_privileges(accept_content):
warnings.warn(RuntimeWarning(ROOT_DISCOURAGED.format(
uid=uid, euid=euid, gid=gid, egid=egid,
)))
-
-
-if sys.version_info < (2, 7, 7): # pragma: no cover
- import functools
-
- def _to_bytes_arg(fun):
- @functools.wraps(fun)
- def _inner(s, *args, **kwargs):
- return fun(s.encode(), *args, **kwargs)
- return _inner
-
- pack = _to_bytes_arg(struct.pack)
- unpack = _to_bytes_arg(struct.unpack)
- unpack_from = _to_bytes_arg(struct.unpack_from)
-else:
- pack = struct.pack
- unpack = struct.unpack
- unpack_from = struct.unpack_from
diff --git a/celery/utils/collections.py b/celery/utils/collections.py
index b9dbf826fa3..b15e122b6b7 100644
--- a/celery/utils/collections.py
+++ b/celery/utils/collections.py
@@ -1,5 +1,4 @@
"""Custom maps, sets, sequences, and other data structures."""
-import sys
import time
from collections import OrderedDict as _OrderedDict
from collections import deque
@@ -193,24 +192,9 @@ def _iterate_values(self):
yield getattr(self.obj, key)
itervalues = _iterate_values
- if sys.version_info[0] == 3: # pragma: no cover
- items = _iterate_items
- keys = _iterate_keys
- values = _iterate_values
- else:
-
- def keys(self):
- # type: () -> List[Any]
- return list(self)
-
- def items(self):
- # type: () -> List[Tuple[Any, Any]]
- return list(self._iterate_items())
-
- def values(self):
- # type: () -> List[Any]
- return list(self._iterate_values())
-
+ items = _iterate_items
+ keys = _iterate_keys
+ values = _iterate_values
MutableMapping.register(DictAttribute) # noqa: E305
@@ -360,23 +344,9 @@ def _iterate_values(self):
def bind_to(self, callback):
self._observers.append(callback)
- if sys.version_info[0] == 3: # pragma: no cover
- keys = _iterate_keys
- items = _iterate_items
- values = _iterate_values
-
- else: # noqa
- def keys(self):
- # type: () -> List[Any]
- return list(self._iterate_keys())
-
- def items(self):
- # type: () -> List[Tuple[Any, Any]]
- return list(self._iterate_items())
-
- def values(self):
- # type: () -> List[Any]
- return list(self._iterate_values())
+ keys = _iterate_keys
+ items = _iterate_items
+ values = _iterate_values
class ConfigurationView(ChainMap, AttributeDictMixin):
diff --git a/celery/utils/imports.py b/celery/utils/imports.py
index fd9009c32ac..0303bd3c051 100644
--- a/celery/utils/imports.py
+++ b/celery/utils/imports.py
@@ -25,21 +25,14 @@ class NotAPackage(Exception):
"""Raised when importing a package, but it's not a package."""
-if sys.version_info > (3, 3): # pragma: no cover
- def qualname(obj):
- """Return object name."""
- if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
- obj = obj.__class__
- q = getattr(obj, '__qualname__', None)
- if '.' not in q:
- q = '.'.join((obj.__module__, q))
- return q
-else:
- def qualname(obj): # noqa
- """Return object name."""
- if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
- obj = obj.__class__
- return '.'.join((obj.__module__, obj.__name__))
+def qualname(obj):
+ """Return object name."""
+ if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
+ obj = obj.__class__
+ q = getattr(obj, '__qualname__', None)
+ if '.' not in q:
+ q = '.'.join((obj.__module__, q))
+ return q
def instantiate(name, *args, **kwargs):
diff --git a/t/unit/app/test_log.py b/t/unit/app/test_log.py
index 453c3f26702..3793b7e8276 100644
--- a/t/unit/app/test_log.py
+++ b/t/unit/app/test_log.py
@@ -103,8 +103,6 @@ def test_formatException_bytes(self, safe_str, fe):
raise Exception()
except Exception:
assert x.formatException(sys.exc_info())
- if sys.version_info[0] == 2:
- safe_str.assert_called()
@patch('logging.Formatter.format')
def test_format_object(self, _format):
@@ -222,9 +220,7 @@ def test_setup_logger_no_handlers_stream(self):
@patch('os.fstat')
def test_setup_logger_no_handlers_file(self, *args):
tempfile = mktemp(suffix='unittest', prefix='celery')
- _open = ('builtins.open' if sys.version_info[0] == 3
- else '__builtin__.open')
- with patch(_open) as osopen:
+ with patch('builtins.open') as osopen:
with mock.restore_logging():
files = defaultdict(StringIO)
diff --git a/t/unit/backends/test_base.py b/t/unit/backends/test_base.py
index fbcda1ceb3e..0e4bb133c85 100644
--- a/t/unit/backends/test_base.py
+++ b/t/unit/backends/test_base.py
@@ -1,4 +1,3 @@
-import sys
from contextlib import contextmanager
from unittest.mock import ANY, Mock, call, patch, sentinel
@@ -258,7 +257,6 @@ def test_json_exception_arguments(self):
y = self.b.exception_to_python(x)
assert isinstance(y, Exception)
- @pytest.mark.skipif(sys.version_info < (3, 3), reason='no qualname support')
def test_json_exception_nested(self):
self.b.serializer = 'json'
x = self.b.prepare_exception(objectexception.Nested('msg'))
@@ -276,10 +274,7 @@ def test_impossible(self):
assert str(x)
y = self.b.exception_to_python(x)
assert y.__class__.__name__ == 'Impossible'
- if sys.version_info < (2, 5):
- assert y.__class__.__module__
- else:
- assert y.__class__.__module__ == 'foo.module'
+ assert y.__class__.__module__ == 'foo.module'
def test_regular(self):
self.b.serializer = 'pickle'
@@ -403,9 +398,6 @@ def test_fail_from_current_stack(self):
self.b.mark_as_failure = Mock()
frame_list = []
- if (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear = Mock()
-
def raise_dummy():
frame_str_temp = str(inspect.currentframe().__repr__)
frame_list.append(frame_str_temp)
@@ -420,14 +412,11 @@ def raise_dummy():
assert args[1] is exc
assert args[2]
- if sys.version_info >= (3, 5, 0):
- tb_ = exc.__traceback__
- while tb_ is not None:
- if str(tb_.tb_frame.__repr__) == frame_list[0]:
- assert len(tb_.tb_frame.f_locals) == 0
- tb_ = tb_.tb_next
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear.assert_called()
+ tb_ = exc.__traceback__
+ while tb_ is not None:
+ if str(tb_.tb_frame.__repr__) == frame_list[0]:
+ assert len(tb_.tb_frame.f_locals) == 0
+ tb_ = tb_.tb_next
def test_prepare_value_serializes_group_result(self):
self.b.serializer = 'json'
diff --git a/t/unit/backends/test_mongodb.py b/t/unit/backends/test_mongodb.py
index 5a391d86d30..d0e651ed37c 100644
--- a/t/unit/backends/test_mongodb.py
+++ b/t/unit/backends/test_mongodb.py
@@ -1,5 +1,4 @@
import datetime
-import sys
from pickle import dumps, loads
from unittest.mock import ANY, MagicMock, Mock, patch, sentinel
@@ -659,8 +658,6 @@ def test_encode_success_results(self, mongo_backend_factory, serializer,
backend = mongo_backend_factory(serializer=serializer)
backend.store_result(TASK_ID, result, 'SUCCESS')
recovered = backend.get_result(TASK_ID)
- if sys.version_info.major == 2 and isinstance(recovered, str):
- result_type = str # workaround for python 2 compatibility and `unicode_literals`
assert type(recovered) == result_type
assert recovered == result
diff --git a/t/unit/tasks/test_trace.py b/t/unit/tasks/test_trace.py
index e78b6aa4148..3d7061acea5 100644
--- a/t/unit/tasks/test_trace.py
+++ b/t/unit/tasks/test_trace.py
@@ -176,42 +176,33 @@ def raise_dummy():
except KeyError as exc:
traceback_clear(exc)
- if sys.version_info >= (3, 5, 0):
- tb_ = exc.__traceback__
- while tb_ is not None:
- if str(tb_.tb_frame.__repr__) == frame_list[0]:
- assert len(tb_.tb_frame.f_locals) == 0
- tb_ = tb_.tb_next
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear.assert_called()
+ tb_ = exc.__traceback__
+ while tb_ is not None:
+ if str(tb_.tb_frame.__repr__) == frame_list[0]:
+ assert len(tb_.tb_frame.f_locals) == 0
+ tb_ = tb_.tb_next
try:
raise_dummy()
except KeyError as exc:
traceback_clear()
- if sys.version_info >= (3, 5, 0):
- tb_ = exc.__traceback__
- while tb_ is not None:
- if str(tb_.tb_frame.__repr__) == frame_list[0]:
- assert len(tb_.tb_frame.f_locals) == 0
- tb_ = tb_.tb_next
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear.assert_called()
+ tb_ = exc.__traceback__
+ while tb_ is not None:
+ if str(tb_.tb_frame.__repr__) == frame_list[0]:
+ assert len(tb_.tb_frame.f_locals) == 0
+ tb_ = tb_.tb_next
try:
raise_dummy()
except KeyError as exc:
traceback_clear(str(exc))
- if sys.version_info >= (3, 5, 0):
- tb_ = exc.__traceback__
- while tb_ is not None:
- if str(tb_.tb_frame.__repr__) == frame_list[0]:
- assert len(tb_.tb_frame.f_locals) == 0
- tb_ = tb_.tb_next
- elif (2, 7, 0) <= sys.version_info < (3, 0, 0):
- sys.exc_clear.assert_called()
+ tb_ = exc.__traceback__
+ while tb_ is not None:
+ if str(tb_.tb_frame.__repr__) == frame_list[0]:
+ assert len(tb_.tb_frame.f_locals) == 0
+ tb_ = tb_.tb_next
@patch('celery.app.trace.traceback_clear')
def test_when_Ignore(self, mock_traceback_clear):
diff --git a/t/unit/utils/test_local.py b/t/unit/utils/test_local.py
index a10accf086d..621a77595b2 100644
--- a/t/unit/utils/test_local.py
+++ b/t/unit/utils/test_local.py
@@ -1,4 +1,3 @@
-import sys
from unittest.mock import Mock
import pytest
@@ -143,8 +142,6 @@ def test_listproxy(self):
x[0:2] = [1, 2]
del(x[0:2])
assert str(x)
- if sys.version_info[0] < 3:
- assert x.__cmp__(object()) == -1
def test_complex_cast(self):
diff --git a/t/unit/worker/test_request.py b/t/unit/worker/test_request.py
index d63ccbb1147..c0d0119d9b8 100644
--- a/t/unit/worker/test_request.py
+++ b/t/unit/worker/test_request.py
@@ -2,15 +2,13 @@
import os
import signal
import socket
-import sys
from datetime import datetime, timedelta
from time import monotonic, time
from unittest.mock import Mock, patch
import pytest
from billiard.einfo import ExceptionInfo
-from kombu.utils.encoding import (default_encode, from_utf8, safe_repr,
- safe_str)
+from kombu.utils.encoding import from_utf8, safe_repr, safe_str
from kombu.utils.uuid import uuid
from celery import states
@@ -99,29 +97,6 @@ def jail(app, task_id, name, args, kwargs):
).retval
-@pytest.mark.skipif(sys.version_info[0] > 3, reason='Py2 only')
-class test_default_encode:
-
- def test_jython(self):
- prev, sys.platform = sys.platform, 'java 1.6.1'
- try:
- assert default_encode(b'foo') == b'foo'
- finally:
- sys.platform = prev
-
- def test_cpython(self):
- prev, sys.platform = sys.platform, 'darwin'
- gfe, sys.getfilesystemencoding = (
- sys.getfilesystemencoding,
- lambda: 'utf-8',
- )
- try:
- assert default_encode(b'foo') == b'foo'
- finally:
- sys.platform = prev
- sys.getfilesystemencoding = gfe
-
-
class test_Retry:
def test_retry_semipredicate(self):
From 2cc4d999106f573802c14a15a22fac6dfd8e781e Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 25 Nov 2020 16:02:48 +0200
Subject: [PATCH 124/157] Restore ability to extend the CLI with new
sub-commands.
---
celery/bin/celery.py | 3 +++
requirements/default.txt | 1 +
2 files changed, 4 insertions(+)
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
index 6626c21fa64..095766c0f4d 100644
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -7,6 +7,8 @@
import click.exceptions
from click.types import ParamType
from click_didyoumean import DYMGroup
+from click_plugins import with_plugins
+from pkg_resources import iter_entry_points
from celery import VERSION_BANNER
from celery.app.utils import find_app
@@ -69,6 +71,7 @@ def convert(self, value, param, ctx):
APP = App()
+@with_plugins(iter_entry_points('celery.commands'))
@click.group(cls=DYMGroup, invoke_without_command=True)
@click.option('-A',
'--app',
diff --git a/requirements/default.txt b/requirements/default.txt
index 124c56679da..3eafbb470f5 100644
--- a/requirements/default.txt
+++ b/requirements/default.txt
@@ -5,3 +5,4 @@ vine>=5.0.0,<6.0
click>=7.0
click-didyoumean>=0.0.3
click-repl>=0.1.6
+click-plugins>=1.1.1
From 07000d826573a97ff633b688bda7bf30db114dfe Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 25 Nov 2020 16:56:42 +0200
Subject: [PATCH 125/157] Adjust documentation to demonstrate how to introduce
sub-command plugins in 5.x.
Fixes #6439.
---
docs/userguide/extending.rst | 39 +++++++++++++-----------------------
1 file changed, 14 insertions(+), 25 deletions(-)
diff --git a/docs/userguide/extending.rst b/docs/userguide/extending.rst
index 969eb72a51c..21ff68ecd2a 100644
--- a/docs/userguide/extending.rst
+++ b/docs/userguide/extending.rst
@@ -816,12 +816,10 @@ Entry-points is special meta-data that can be added to your packages ``setup.py`
and then after installation, read from the system using the :mod:`pkg_resources` module.
Celery recognizes ``celery.commands`` entry-points to install additional
-sub-commands, where the value of the entry-point must point to a valid subclass
-of :class:`celery.bin.base.Command`. There's limited documentation,
-unfortunately, but you can find inspiration from the various commands in the
-:mod:`celery.bin` package.
+sub-commands, where the value of the entry-point must point to a valid click
+command.
-This is how the :pypi:`Flower` monitoring extension adds the :program:`celery flower` command,
+This is how the :pypi:`Flower` monitoring extension may add the :program:`celery flower` command,
by adding an entry-point in :file:`setup.py`:
.. code-block:: python
@@ -830,44 +828,35 @@ by adding an entry-point in :file:`setup.py`:
name='flower',
entry_points={
'celery.commands': [
- 'flower = flower.command:FlowerCommand',
+ 'flower = flower.command:flower',
],
}
)
The command definition is in two parts separated by the equal sign, where the
first part is the name of the sub-command (flower), then the second part is
-the fully qualified symbol path to the class that implements the command:
+the fully qualified symbol path to the function that implements the command:
.. code-block:: text
- flower.command:FlowerCommand
+ flower.command:flower
The module path and the name of the attribute should be separated by colon
as above.
-In the module :file:`flower/command.py`, the command class is defined
-something like this:
+In the module :file:`flower/command.py`, the command function may be defined
+as the following:
.. code-block:: python
- from celery.bin.base import Command
+ import click
-
- class FlowerCommand(Command):
-
- def add_arguments(self, parser):
- parser.add_argument(
- '--port', default=8888, type='int',
- help='Webserver port',
- ),
- parser.add_argument(
- '--debug', action='store_true',
- )
-
- def run(self, port=None, debug=False, **kwargs):
- print('Running our command')
+ @click.command()
+ @click.option('--port', default=8888, type=int, help='Webserver port')
+ @click.option('--debug', is_flag=True)
+ def flower(port, debug):
+ print('Running our command')
Worker API
From 681e72edb918c8ff315665a6abbfc6dd99f303e2 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 26 Nov 2020 11:41:40 +0200
Subject: [PATCH 126/157] autopep8 & isort.
---
celery/concurrency/asynpool.py | 2 +-
celery/utils/collections.py | 1 +
t/unit/app/test_app.py | 2 +-
t/unit/tasks/test_chord.py | 2 +-
4 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/celery/concurrency/asynpool.py b/celery/concurrency/asynpool.py
index 5f17f247d62..f4d1c475a8e 100644
--- a/celery/concurrency/asynpool.py
+++ b/celery/concurrency/asynpool.py
@@ -21,9 +21,9 @@
from io import BytesIO
from numbers import Integral
from pickle import HIGHEST_PROTOCOL
+from struct import pack, unpack, unpack_from
from time import sleep
from weakref import WeakValueDictionary, ref
-from struct import pack, unpack, unpack_from
from billiard import pool as _pool
from billiard.compat import buf_t, isblocking, setblocking
diff --git a/celery/utils/collections.py b/celery/utils/collections.py
index b15e122b6b7..f19014c2dca 100644
--- a/celery/utils/collections.py
+++ b/celery/utils/collections.py
@@ -196,6 +196,7 @@ def _iterate_values(self):
keys = _iterate_keys
values = _iterate_values
+
MutableMapping.register(DictAttribute) # noqa: E305
diff --git a/t/unit/app/test_app.py b/t/unit/app/test_app.py
index 2512b16cd4f..5178cbdf59b 100644
--- a/t/unit/app/test_app.py
+++ b/t/unit/app/test_app.py
@@ -17,8 +17,8 @@
from celery import current_app, shared_task
from celery.app import base as _appbase
from celery.app import defaults
-from celery.exceptions import ImproperlyConfigured
from celery.backends.base import Backend
+from celery.exceptions import ImproperlyConfigured
from celery.loaders.base import unconfigured
from celery.platforms import pyimplementation
from celery.utils.collections import DictAttribute
diff --git a/t/unit/tasks/test_chord.py b/t/unit/tasks/test_chord.py
index bbec557831a..f4e03a0e130 100644
--- a/t/unit/tasks/test_chord.py
+++ b/t/unit/tasks/test_chord.py
@@ -1,5 +1,5 @@
from contextlib import contextmanager
-from unittest.mock import Mock, patch, sentinel, PropertyMock
+from unittest.mock import Mock, PropertyMock, patch, sentinel
import pytest
From ffacfe3e384554d1eeaaeb84a4b8e45171122b18 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 26 Nov 2020 11:55:53 +0200
Subject: [PATCH 127/157] Linters now run using Python 3.9.
---
.travis.yml | 2 +-
tox.ini | 3 +--
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/.travis.yml b/.travis.yml
index 3c532ee95de..316d206de11 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -59,7 +59,7 @@ jobs:
env: MATRIX_TOXENV=integration-elasticsearch
stage: integration
- - python: '3.8'
+ - python: '3.9'
env:
- TOXENV=flake8,apicheck,configcheck,bandit
- CELERY_TOX_PARALLEL='--parallel --parallel-live'
diff --git a/tox.ini b/tox.ini
index 8ec20b7a007..efdfa1c56be 100644
--- a/tox.ini
+++ b/tox.ini
@@ -65,8 +65,7 @@ basepython =
3.8: python3.8
3.9: python3.9
pypy3: pypy3
- flake8,apicheck,linkcheck,configcheck,bandit: python3.8
- flakeplus: python2.7
+ flake8,apicheck,linkcheck,configcheck,bandit: python3.9
usedevelop = True
[testenv:apicheck]
From 6d4b6cbb61bf19695f1a64774d4e67368a7a6af7 Mon Sep 17 00:00:00 2001
From: Matus Valo
Date: Fri, 27 Nov 2020 16:46:26 +0100
Subject: [PATCH 128/157] Fix apply_async() in Calling Tasks userguide
---
docs/userguide/calling.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/userguide/calling.rst b/docs/userguide/calling.rst
index 811820b44a1..363e8f2a9a8 100644
--- a/docs/userguide/calling.rst
+++ b/docs/userguide/calling.rst
@@ -711,13 +711,13 @@ setting or by using the ``ignore_result`` option:
.. code-block:: pycon
- >>> result = add.apply_async(1, 2, ignore_result=True)
+ >>> result = add.apply_async((1, 2), ignore_result=True)
>>> result.get()
None
>>> # Do not ignore result (default)
...
- >>> result = add.apply_async(1, 2, ignore_result=False)
+ >>> result = add.apply_async((1, 2), ignore_result=False)
>>> result.get()
3
From 5529c33ed14520341d3ea7929e2722a7066e7509 Mon Sep 17 00:00:00 2001
From: henribru <6639509+henribru@users.noreply.github.com>
Date: Sun, 29 Nov 2020 15:31:17 +0100
Subject: [PATCH 129/157] Fix dead links in contributing guide (#6506)
---
CONTRIBUTING.rst | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 9814b9c7ee4..e869a4f45fe 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -40,7 +40,7 @@ The Code of Conduct is heavily based on the `Ubuntu Code of Conduct`_, and
the `Pylons Code of Conduct`_.
.. _`Ubuntu Code of Conduct`: https://www.ubuntu.com/community/conduct
-.. _`Pylons Code of Conduct`: http://docs.pylonshq.com/community/conduct.html
+.. _`Pylons Code of Conduct`: https://pylonsproject.org/community-code-of-conduct.html
Be considerate
--------------
@@ -447,7 +447,7 @@ fetch and checkout a remote branch like this::
.. _`Fork a Repo`: https://help.github.com/fork-a-repo/
.. _`Rebasing merge commits in git`:
- https://notes.envato.com/developers/rebasing-merge-commits-in-git/
+ https://web.archive.org/web/20150627054345/http://marketblog.envato.com/general/rebasing-merge-commits-in-git/
.. _`Rebase`: https://help.github.com/rebase/
.. _contributing-docker-development:
From 443ef65248fa2f4cd0931119ce4d5942aa7b2b4b Mon Sep 17 00:00:00 2001
From: henribru <6639509+henribru@users.noreply.github.com>
Date: Mon, 30 Nov 2020 04:17:33 +0100
Subject: [PATCH 130/157] Fix inconsistency in documentation for `link_error`
(#6505)
* Make documentation of link_error consistent
Fixes #4099
* Fix undefined variable in example
* Add to contributors list
---
CONTRIBUTORS.txt | 1 +
docs/userguide/calling.rst | 15 +++++----------
docs/userguide/canvas.rst | 2 +-
3 files changed, 7 insertions(+), 11 deletions(-)
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index a29157e1e57..2e27e625d43 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -278,3 +278,4 @@ Dipankar Achinta, 2019/10/24
Sardorbek Imomaliev, 2020/01/24
Maksym Shalenyi, 2020/07/30
Frazer McLean, 2020/09/29
+Henrik Bruåsdal, 2020/11/29
diff --git a/docs/userguide/calling.rst b/docs/userguide/calling.rst
index 363e8f2a9a8..efeb1bb6c13 100644
--- a/docs/userguide/calling.rst
+++ b/docs/userguide/calling.rst
@@ -135,23 +135,18 @@ task that adds 16 to the previous result, forming the expression
You can also cause a callback to be applied if task raises an exception
-(*errback*), but this behaves differently from a regular callback
-in that it will be passed the id of the parent task, not the result.
-This is because it may not always be possible to serialize
-the exception raised, and so this way the error callback requires
-a result backend to be enabled, and the task must retrieve the result
-of the task instead.
+(*errback*). The worker won't actually call the errback as a task, but will
+instead call the errback function directly so that the raw request, exception
+and traceback objects can be passed to it.
This is an example error callback:
.. code-block:: python
@app.task
- def error_handler(uuid):
- result = AsyncResult(uuid)
- exc = result.get(propagate=False)
+ def error_handler(request, exc, traceback):
print('Task {0} raised exception: {1!r}\n{2!r}'.format(
- uuid, exc, result.traceback))
+ request.id, exc, traceback))
it can be added to the task using the ``link_error`` execution
option:
diff --git a/docs/userguide/canvas.rst b/docs/userguide/canvas.rst
index 10240768435..67c42ba583c 100644
--- a/docs/userguide/canvas.rst
+++ b/docs/userguide/canvas.rst
@@ -569,7 +569,7 @@ Here's an example errback:
def log_error(request, exc, traceback):
with open(os.path.join('/var/errors', request.id), 'a') as fh:
print('--\n\n{0} {1} {2}'.format(
- task_id, exc, traceback), file=fh)
+ request.id, exc, traceback), file=fh)
To make it even easier to link tasks together there's
a special signature called :class:`~celery.chain` that lets
From ee13eae8e20896beadd89dec8f521bd781522416 Mon Sep 17 00:00:00 2001
From: Stuart Axon
Date: Mon, 30 Nov 2020 13:10:46 +0000
Subject: [PATCH 131/157] Update testing.rst (#6507)
Use double back ticks for some code examples, so that quotes don't get converted into smart-quotes.
https://github.com/celery/celery/issues/6497
---
docs/userguide/testing.rst | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/docs/userguide/testing.rst b/docs/userguide/testing.rst
index 330a24d1dc2..1df28b21978 100644
--- a/docs/userguide/testing.rst
+++ b/docs/userguide/testing.rst
@@ -103,10 +103,10 @@ Enabling
Celery initially ships the plugin in a disabled state, to enable it you can either:
- * `pip install celery[pytest]`
- * `pip install pytest-celery`
- * or add an environment variable `PYTEST_PLUGINS=celery.contrib.pytest`
- * or add `pytest_plugins = ("celery.contrib.pytest", )` to your root conftest.py
+ * ``pip install celery[pytest]``
+ * ``pip install pytest-celery``
+ * or add an environment variable ``PYTEST_PLUGINS=celery.contrib.pytest``
+ * or add ``pytest_plugins = ("celery.contrib.pytest", )`` to your root conftest.py
Marks
From 208e90e40f4aa3bfd5bc75600af9d1ed4e1efa28 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 2 Dec 2020 12:46:44 +0200
Subject: [PATCH 132/157] Don't upgrade click to 8.x since click-repl doesn't
support it yet.
Fixes #6511.
Upstream issue: https://github.com/click-contrib/click-repl/issues/72
---
requirements/default.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements/default.txt b/requirements/default.txt
index 3eafbb470f5..33c3b6be9f8 100644
--- a/requirements/default.txt
+++ b/requirements/default.txt
@@ -2,7 +2,7 @@ pytz>dev
billiard>=3.6.3.0,<4.0
kombu>=5.0.0,<6.0
vine>=5.0.0,<6.0
-click>=7.0
+click>=7.0,<8.0
click-didyoumean>=0.0.3
click-repl>=0.1.6
click-plugins>=1.1.1
From 3a81c267f9ebc54b39be932607041bc77ece5857 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 2 Dec 2020 15:08:10 +0200
Subject: [PATCH 133/157] Update documentation on changes to custom CLI options
in 5.0.
Fixes #6380.
---
docs/conf.py | 1 +
docs/userguide/extending.rst | 17 +++++++----------
docs/whatsnew-5.0.rst | 2 ++
3 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/docs/conf.py b/docs/conf.py
index 6c7dbc6aaad..6cc0f92fe64 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -23,6 +23,7 @@
],
extra_intersphinx_mapping={
'cyanide': ('https://cyanide.readthedocs.io/en/latest', None),
+ 'click': ('https://click.palletsprojects.com/en/7.x/', None),
},
apicheck_ignore_modules=[
'celery.__main__',
diff --git a/docs/userguide/extending.rst b/docs/userguide/extending.rst
index 21ff68ecd2a..443255e1789 100644
--- a/docs/userguide/extending.rst
+++ b/docs/userguide/extending.rst
@@ -729,25 +729,22 @@ You can add additional command-line options to the ``worker``, ``beat``, and
``events`` commands by modifying the :attr:`~@user_options` attribute of the
application instance.
-Celery commands uses the :mod:`argparse` module to parse command-line
-arguments, and so to add custom arguments you need to specify a callback
-that takes a :class:`argparse.ArgumentParser` instance - and adds arguments.
-Please see the :mod:`argparse` documentation to read about the fields supported.
+Celery commands uses the :mod:`click` module to parse command-line
+arguments, and so to add custom arguments you need to add :class:`click.Option` instances
+to the relevant set.
Example adding a custom option to the :program:`celery worker` command:
.. code-block:: python
from celery import Celery
+ from click import Option
app = Celery(broker='amqp://')
- def add_worker_arguments(parser):
- parser.add_argument(
- '--enable-my-option', action='store_true', default=False,
- help='Enable custom option.',
- ),
- app.user_options['worker'].add(add_worker_arguments)
+ app.user_options['worker'].add(Option(('--enable-my-option',),
+ is_flag=True,
+ help='Enable custom option.'))
All bootsteps will now receive this argument as a keyword argument to
diff --git a/docs/whatsnew-5.0.rst b/docs/whatsnew-5.0.rst
index 3f93ce3e979..7e38c924a13 100644
--- a/docs/whatsnew-5.0.rst
+++ b/docs/whatsnew-5.0.rst
@@ -275,6 +275,8 @@ As a result a few breaking changes has been introduced:
- :program:`celery amqp` and :program:`celery shell` require the `repl`
sub command to start a shell. You can now also invoke specific commands
without a shell. Type `celery amqp --help` or `celery shell --help` for details.
+- The API for adding user options has changed.
+ Refer to the :ref:`documentation ` for details.
Click provides shell completion `out of the box `_.
This functionality replaces our previous bash completion script and adds
From 1c076a646ec04b9c920ff75b79a3911096da2838 Mon Sep 17 00:00:00 2001
From: Sonya Chhabra
Date: Wed, 2 Dec 2020 20:16:52 -0500
Subject: [PATCH 134/157] update step to install homebrew
---
docs/getting-started/brokers/rabbitmq.rst | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/getting-started/brokers/rabbitmq.rst b/docs/getting-started/brokers/rabbitmq.rst
index 6f5d95dd8ab..430844bdfec 100644
--- a/docs/getting-started/brokers/rabbitmq.rst
+++ b/docs/getting-started/brokers/rabbitmq.rst
@@ -86,7 +86,7 @@ documentation`_:
.. code-block:: console
- ruby -e "$(curl -fsSL https://raw.github.com/Homebrew/homebrew/go/install)"
+ /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
Finally, we can install RabbitMQ using :command:`brew`:
From 18a0963ed36f87b8fb884ad27cfc2b7f1ca9f53c Mon Sep 17 00:00:00 2001
From: AbdealiJK
Date: Tue, 10 Nov 2020 10:21:49 +0530
Subject: [PATCH 135/157] redis: Support Sentinel with SSL
Use the SentinelManagedSSLConnection when SSL is enabled for the
transport. The redis-py project doesn't have a connection class for
SSL+Sentinel yet. So, create a class in redis.py to add that
functionality.
---
celery/backends/redis.py | 20 ++++++++++++++++++--
t/unit/backends/test_redis.py | 31 +++++++++++++++++++++++++++++++
2 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
index dd3677f569c..6820047c752 100644
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -185,6 +185,7 @@ class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin):
#: :pypi:`redis` client module.
redis = redis
+ connection_class_ssl = redis.SSLConnection if redis else None
#: Maximum number of connections in the pool.
max_connections = None
@@ -236,7 +237,7 @@ def __init__(self, host=None, port=None, db=None, password=None,
ssl = _get('redis_backend_use_ssl')
if ssl:
self.connparams.update(ssl)
- self.connparams['connection_class'] = redis.SSLConnection
+ self.connparams['connection_class'] = self.connection_class_ssl
if url:
self.connparams = self._params_from_url(url, self.connparams)
@@ -245,7 +246,7 @@ def __init__(self, host=None, port=None, db=None, password=None,
# redis_backend_use_ssl dict, check ssl_cert_reqs is valid. If set
# via query string ssl_cert_reqs will be a string so convert it here
if ('connection_class' in self.connparams and
- self.connparams['connection_class'] is redis.SSLConnection):
+ issubclass(self.connparams['connection_class'], redis.SSLConnection)):
ssl_cert_reqs_missing = 'MISSING'
ssl_string_to_constant = {'CERT_REQUIRED': CERT_REQUIRED,
'CERT_OPTIONAL': CERT_OPTIONAL,
@@ -535,10 +536,25 @@ def __reduce__(self, args=(), kwargs=None):
)
+if getattr(redis, "sentinel", None):
+ class SentinelManagedSSLConnection(
+ redis.sentinel.SentinelManagedConnection,
+ redis.SSLConnection):
+ """Connect to a Redis server using Sentinel + TLS.
+
+ Use Sentinel to identify which Redis server is the current master
+ to connect to and when connecting to the Master server, use an
+ SSL Connection.
+ """
+
+ pass
+
+
class SentinelBackend(RedisBackend):
"""Redis sentinel task result store."""
sentinel = getattr(redis, "sentinel", None)
+ connection_class_ssl = SentinelManagedSSLConnection if sentinel else None
def __init__(self, *args, **kwargs):
if self.sentinel is None:
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
index f534077a4fd..3bacc5fcc67 100644
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -1126,3 +1126,34 @@ def test_get_pool(self):
)
pool = x._get_pool(**x.connparams)
assert pool
+
+ def test_backend_ssl(self):
+ pytest.importorskip('redis')
+
+ from celery.backends.redis import SentinelBackend
+ self.app.conf.redis_backend_use_ssl = {
+ 'ssl_cert_reqs': "CERT_REQUIRED",
+ 'ssl_ca_certs': '/path/to/ca.crt',
+ 'ssl_certfile': '/path/to/client.crt',
+ 'ssl_keyfile': '/path/to/client.key',
+ }
+ self.app.conf.redis_socket_timeout = 30.0
+ self.app.conf.redis_socket_connect_timeout = 100.0
+ x = SentinelBackend(
+ 'sentinel://:bosco@vandelay.com:123//1', app=self.app,
+ )
+ assert x.connparams
+ assert len(x.connparams['hosts']) == 1
+ assert x.connparams['hosts'][0]['host'] == 'vandelay.com'
+ assert x.connparams['hosts'][0]['db'] == 1
+ assert x.connparams['hosts'][0]['port'] == 123
+ assert x.connparams['hosts'][0]['password'] == 'bosco'
+ assert x.connparams['socket_timeout'] == 30.0
+ assert x.connparams['socket_connect_timeout'] == 100.0
+ assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED
+ assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt'
+ assert x.connparams['ssl_certfile'] == '/path/to/client.crt'
+ assert x.connparams['ssl_keyfile'] == '/path/to/client.key'
+
+ from celery.backends.redis import SentinelManagedSSLConnection
+ assert x.connparams['connection_class'] is SentinelManagedSSLConnection
From 0fa4db8889325fd774f7e89ebb219a87fc1d8cfb Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 3 Dec 2020 17:54:32 +0200
Subject: [PATCH 136/157] Revert "redis: Support Sentinel with SSL" (#6518)
This reverts commit 18a0963ed36f87b8fb884ad27cfc2b7f1ca9f53c.
---
celery/backends/redis.py | 20 ++------------------
t/unit/backends/test_redis.py | 31 -------------------------------
2 files changed, 2 insertions(+), 49 deletions(-)
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
index 6820047c752..dd3677f569c 100644
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -185,7 +185,6 @@ class RedisBackend(BaseKeyValueStoreBackend, AsyncBackendMixin):
#: :pypi:`redis` client module.
redis = redis
- connection_class_ssl = redis.SSLConnection if redis else None
#: Maximum number of connections in the pool.
max_connections = None
@@ -237,7 +236,7 @@ def __init__(self, host=None, port=None, db=None, password=None,
ssl = _get('redis_backend_use_ssl')
if ssl:
self.connparams.update(ssl)
- self.connparams['connection_class'] = self.connection_class_ssl
+ self.connparams['connection_class'] = redis.SSLConnection
if url:
self.connparams = self._params_from_url(url, self.connparams)
@@ -246,7 +245,7 @@ def __init__(self, host=None, port=None, db=None, password=None,
# redis_backend_use_ssl dict, check ssl_cert_reqs is valid. If set
# via query string ssl_cert_reqs will be a string so convert it here
if ('connection_class' in self.connparams and
- issubclass(self.connparams['connection_class'], redis.SSLConnection)):
+ self.connparams['connection_class'] is redis.SSLConnection):
ssl_cert_reqs_missing = 'MISSING'
ssl_string_to_constant = {'CERT_REQUIRED': CERT_REQUIRED,
'CERT_OPTIONAL': CERT_OPTIONAL,
@@ -536,25 +535,10 @@ def __reduce__(self, args=(), kwargs=None):
)
-if getattr(redis, "sentinel", None):
- class SentinelManagedSSLConnection(
- redis.sentinel.SentinelManagedConnection,
- redis.SSLConnection):
- """Connect to a Redis server using Sentinel + TLS.
-
- Use Sentinel to identify which Redis server is the current master
- to connect to and when connecting to the Master server, use an
- SSL Connection.
- """
-
- pass
-
-
class SentinelBackend(RedisBackend):
"""Redis sentinel task result store."""
sentinel = getattr(redis, "sentinel", None)
- connection_class_ssl = SentinelManagedSSLConnection if sentinel else None
def __init__(self, *args, **kwargs):
if self.sentinel is None:
diff --git a/t/unit/backends/test_redis.py b/t/unit/backends/test_redis.py
index 3bacc5fcc67..f534077a4fd 100644
--- a/t/unit/backends/test_redis.py
+++ b/t/unit/backends/test_redis.py
@@ -1126,34 +1126,3 @@ def test_get_pool(self):
)
pool = x._get_pool(**x.connparams)
assert pool
-
- def test_backend_ssl(self):
- pytest.importorskip('redis')
-
- from celery.backends.redis import SentinelBackend
- self.app.conf.redis_backend_use_ssl = {
- 'ssl_cert_reqs': "CERT_REQUIRED",
- 'ssl_ca_certs': '/path/to/ca.crt',
- 'ssl_certfile': '/path/to/client.crt',
- 'ssl_keyfile': '/path/to/client.key',
- }
- self.app.conf.redis_socket_timeout = 30.0
- self.app.conf.redis_socket_connect_timeout = 100.0
- x = SentinelBackend(
- 'sentinel://:bosco@vandelay.com:123//1', app=self.app,
- )
- assert x.connparams
- assert len(x.connparams['hosts']) == 1
- assert x.connparams['hosts'][0]['host'] == 'vandelay.com'
- assert x.connparams['hosts'][0]['db'] == 1
- assert x.connparams['hosts'][0]['port'] == 123
- assert x.connparams['hosts'][0]['password'] == 'bosco'
- assert x.connparams['socket_timeout'] == 30.0
- assert x.connparams['socket_connect_timeout'] == 100.0
- assert x.connparams['ssl_cert_reqs'] == ssl.CERT_REQUIRED
- assert x.connparams['ssl_ca_certs'] == '/path/to/ca.crt'
- assert x.connparams['ssl_certfile'] == '/path/to/client.crt'
- assert x.connparams['ssl_keyfile'] == '/path/to/client.key'
-
- from celery.backends.redis import SentinelManagedSSLConnection
- assert x.connparams['connection_class'] is SentinelManagedSSLConnection
From 4fad9072ff4eca154ab0ac0b76f3a54fd7e738fe Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 3 Dec 2020 18:16:10 +0200
Subject: [PATCH 137/157] Reintroduce support for custom preload options
(#6516)
* Restore preload options.
Fixes #6307.
* Document breaking changes for preload options in 5.0.
Fixes #6379.
---
celery/bin/amqp.py | 3 +++
celery/bin/base.py | 21 +++++++++++++++++++++
celery/bin/beat.py | 4 +++-
celery/bin/call.py | 5 +++--
celery/bin/celery.py | 3 +++
celery/bin/control.py | 6 +++++-
celery/bin/events.py | 5 ++++-
celery/bin/graph.py | 3 ++-
celery/bin/list.py | 3 ++-
celery/bin/logtool.py | 3 ++-
celery/bin/migrate.py | 4 +++-
celery/bin/multi.py | 3 ++-
celery/bin/purge.py | 4 +++-
celery/bin/result.py | 4 +++-
celery/bin/shell.py | 4 +++-
celery/bin/upgrade.py | 4 +++-
celery/bin/worker.py | 4 +++-
docs/userguide/extending.rst | 19 ++++++-------------
18 files changed, 74 insertions(+), 28 deletions(-)
diff --git a/celery/bin/amqp.py b/celery/bin/amqp.py
index e8b7f24066c..ab8ab5f0100 100644
--- a/celery/bin/amqp.py
+++ b/celery/bin/amqp.py
@@ -8,6 +8,8 @@
__all__ = ('amqp',)
+from celery.bin.base import handle_preload_options
+
def dump_message(message):
if message is None:
@@ -54,6 +56,7 @@ def reconnect(self):
@click.group(invoke_without_command=True)
@click.pass_context
+@handle_preload_options
def amqp(ctx):
"""AMQP Administration Shell.
diff --git a/celery/bin/base.py b/celery/bin/base.py
index 52f94382c65..78d6371b420 100644
--- a/celery/bin/base.py
+++ b/celery/bin/base.py
@@ -1,6 +1,7 @@
"""Click customizations for Celery."""
import json
from collections import OrderedDict
+from functools import update_wrapper
from pprint import pformat
import click
@@ -8,6 +9,7 @@
from kombu.utils.objects import cached_property
from celery._state import get_current_app
+from celery.signals import user_preload_options
from celery.utils import text
from celery.utils.log import mlevel
from celery.utils.time import maybe_iso8601
@@ -113,6 +115,25 @@ def say_chat(self, direction, title, body='', show_body=False):
self.echo(body)
+def handle_preload_options(f):
+ def caller(ctx, *args, **kwargs):
+ app = ctx.obj.app
+
+ preload_options = [o.name for o in app.user_options.get('preload', [])]
+
+ if preload_options:
+ user_options = {
+ preload_option: kwargs[preload_option]
+ for preload_option in preload_options
+ }
+
+ user_preload_options.send(sender=f, app=app, options=user_options)
+
+ return f(ctx, *args, **kwargs)
+
+ return update_wrapper(caller, f)
+
+
class CeleryOption(click.Option):
"""Customized option for Celery."""
diff --git a/celery/bin/beat.py b/celery/bin/beat.py
index 54a74c14c7e..145b44e9720 100644
--- a/celery/bin/beat.py
+++ b/celery/bin/beat.py
@@ -3,7 +3,8 @@
import click
-from celery.bin.base import LOG_LEVEL, CeleryDaemonCommand, CeleryOption
+from celery.bin.base import (LOG_LEVEL, CeleryDaemonCommand, CeleryOption,
+ handle_preload_options)
from celery.platforms import detached, maybe_drop_privileges
@@ -43,6 +44,7 @@
help_group="Beat Options",
help="Logging level.")
@click.pass_context
+@handle_preload_options
def beat(ctx, detach=False, logfile=None, pidfile=None, uid=None,
gid=None, umask=None, workdir=None, **kwargs):
"""Start the beat periodic task scheduler."""
diff --git a/celery/bin/call.py b/celery/bin/call.py
index c2744a4cd28..35ca34e3f33 100644
--- a/celery/bin/call.py
+++ b/celery/bin/call.py
@@ -2,9 +2,10 @@
import click
from celery.bin.base import (ISO8601, ISO8601_OR_FLOAT, JSON, CeleryCommand,
- CeleryOption)
+ CeleryOption, handle_preload_options)
+@click.command(cls=CeleryCommand)
@click.argument('name')
@click.option('-a',
'--args',
@@ -52,8 +53,8 @@
cls=CeleryOption,
help_group="Routing Options",
help="custom routing key.")
-@click.command(cls=CeleryCommand)
@click.pass_context
+@handle_preload_options
def call(ctx, name, args, kwargs, eta, countdown, expires, serializer, queue, exchange, routing_key):
"""Call a task by name."""
task_id = ctx.obj.app.send_task(
diff --git a/celery/bin/celery.py b/celery/bin/celery.py
index 095766c0f4d..c6b862d0f10 100644
--- a/celery/bin/celery.py
+++ b/celery/bin/celery.py
@@ -145,6 +145,9 @@ def celery(ctx, app, broker, result_backend, loader, config, workdir,
beat.params.extend(ctx.obj.app.user_options.get('beat', []))
events.params.extend(ctx.obj.app.user_options.get('events', []))
+ for command in celery.commands.values():
+ command.params.extend(ctx.obj.app.user_options.get('preload', []))
+
@celery.command(cls=CeleryCommand)
@click.pass_context
diff --git a/celery/bin/control.py b/celery/bin/control.py
index a48de89ce72..3fe8eb76b42 100644
--- a/celery/bin/control.py
+++ b/celery/bin/control.py
@@ -4,7 +4,8 @@
import click
from kombu.utils.json import dumps
-from celery.bin.base import COMMA_SEPARATED_LIST, CeleryCommand, CeleryOption
+from celery.bin.base import (COMMA_SEPARATED_LIST, CeleryCommand,
+ CeleryOption, handle_preload_options)
from celery.platforms import EX_UNAVAILABLE
from celery.utils import text
from celery.worker.control import Panel
@@ -71,6 +72,7 @@ def _compile_arguments(action, args):
help_group='Remote Control Options',
help='Use json as output format.')
@click.pass_context
+@handle_preload_options
def status(ctx, timeout, destination, json, **kwargs):
"""Show list of workers that are online."""
callback = None if json else partial(_say_remote_command_reply, ctx)
@@ -115,6 +117,7 @@ def status(ctx, timeout, destination, json, **kwargs):
help_group='Remote Control Options',
help='Use json as output format.')
@click.pass_context
+@handle_preload_options
def inspect(ctx, action, timeout, destination, json, **kwargs):
"""Inspect the worker at runtime.
@@ -164,6 +167,7 @@ def inspect(ctx, action, timeout, destination, json, **kwargs):
help_group='Remote Control Options',
help='Use json as output format.')
@click.pass_context
+@handle_preload_options
def control(ctx, action, timeout, destination, json):
"""Workers remote control.
diff --git a/celery/bin/events.py b/celery/bin/events.py
index 0e3bd1a8aea..dc535f5b7b7 100644
--- a/celery/bin/events.py
+++ b/celery/bin/events.py
@@ -4,7 +4,8 @@
import click
-from celery.bin.base import LOG_LEVEL, CeleryDaemonCommand, CeleryOption
+from celery.bin.base import (LOG_LEVEL, CeleryDaemonCommand, CeleryOption,
+ handle_preload_options)
from celery.platforms import detached, set_process_title, strargv
@@ -47,6 +48,7 @@ def _run_evtop(app):
raise click.UsageError("The curses module is required for this command.")
+@handle_preload_options
@click.command(cls=CeleryDaemonCommand)
@click.option('-d',
'--dump',
@@ -78,6 +80,7 @@ def _run_evtop(app):
help_group="Snapshot",
help="Logging level.")
@click.pass_context
+@handle_preload_options
def events(ctx, dump, camera, detach, frequency, maxrate, loglevel, **kwargs):
"""Event-stream utilities."""
app = ctx.obj.app
diff --git a/celery/bin/graph.py b/celery/bin/graph.py
index 3013077b4b5..93b01e808fa 100644
--- a/celery/bin/graph.py
+++ b/celery/bin/graph.py
@@ -4,11 +4,12 @@
import click
-from celery.bin.base import CeleryCommand
+from celery.bin.base import CeleryCommand, handle_preload_options
from celery.utils.graph import DependencyGraph, GraphFormatter
@click.group()
+@handle_preload_options
def graph():
"""The ``celery graph`` command."""
diff --git a/celery/bin/list.py b/celery/bin/list.py
index fefc5e73fde..06c4fbf28bf 100644
--- a/celery/bin/list.py
+++ b/celery/bin/list.py
@@ -1,10 +1,11 @@
"""The ``celery list bindings`` command, used to inspect queue bindings."""
import click
-from celery.bin.base import CeleryCommand
+from celery.bin.base import CeleryCommand, handle_preload_options
@click.group(name="list")
+@handle_preload_options
def list_():
"""Get info from broker.
diff --git a/celery/bin/logtool.py b/celery/bin/logtool.py
index 07dbffa8767..83e8064bdb0 100644
--- a/celery/bin/logtool.py
+++ b/celery/bin/logtool.py
@@ -5,7 +5,7 @@
import click
-from celery.bin.base import CeleryCommand
+from celery.bin.base import CeleryCommand, handle_preload_options
__all__ = ('logtool',)
@@ -111,6 +111,7 @@ def report(self):
@click.group()
+@handle_preload_options
def logtool():
"""The ``celery logtool`` command."""
diff --git a/celery/bin/migrate.py b/celery/bin/migrate.py
index c5ba9b33c43..febaaaacab2 100644
--- a/celery/bin/migrate.py
+++ b/celery/bin/migrate.py
@@ -2,7 +2,8 @@
import click
from kombu import Connection
-from celery.bin.base import CeleryCommand, CeleryOption
+from celery.bin.base import (CeleryCommand, CeleryOption,
+ handle_preload_options)
from celery.contrib.migrate import migrate_tasks
@@ -44,6 +45,7 @@
help_group='Migration Options',
help='Continually migrate tasks until killed.')
@click.pass_context
+@handle_preload_options
def migrate(ctx, source, destination, **kwargs):
"""Migrate tasks from one broker to another.
diff --git a/celery/bin/multi.py b/celery/bin/multi.py
index 12bb52b87d2..82a86a6129e 100644
--- a/celery/bin/multi.py
+++ b/celery/bin/multi.py
@@ -108,7 +108,7 @@
from celery import VERSION_BANNER
from celery.apps.multi import Cluster, MultiParser, NamespacedOptionParser
-from celery.bin.base import CeleryCommand
+from celery.bin.base import CeleryCommand, handle_preload_options
from celery.platforms import EX_FAILURE, EX_OK, signals
from celery.utils import term
from celery.utils.text import pluralize
@@ -468,6 +468,7 @@ def DOWN(self):
}
)
@click.pass_context
+@handle_preload_options
def multi(ctx):
"""Start multiple worker instances."""
cmd = MultiTool(quiet=ctx.obj.quiet, no_color=ctx.obj.no_color)
diff --git a/celery/bin/purge.py b/celery/bin/purge.py
index 609a9a0f660..2629ac7eff3 100644
--- a/celery/bin/purge.py
+++ b/celery/bin/purge.py
@@ -1,7 +1,8 @@
"""The ``celery purge`` program, used to delete messages from queues."""
import click
-from celery.bin.base import COMMA_SEPARATED_LIST, CeleryCommand, CeleryOption
+from celery.bin.base import (COMMA_SEPARATED_LIST, CeleryCommand,
+ CeleryOption, handle_preload_options)
from celery.utils import text
@@ -25,6 +26,7 @@
help_group='Purging Options',
help="Comma separated list of queues names not to purge.")
@click.pass_context
+@handle_preload_options
def purge(ctx, force, queues, exclude_queues):
"""Erase all messages from all known task queues.
diff --git a/celery/bin/result.py b/celery/bin/result.py
index d90421c4cde..c126fb588ee 100644
--- a/celery/bin/result.py
+++ b/celery/bin/result.py
@@ -1,7 +1,8 @@
"""The ``celery result`` program, used to inspect task results."""
import click
-from celery.bin.base import CeleryCommand, CeleryOption
+from celery.bin.base import (CeleryCommand, CeleryOption,
+ handle_preload_options)
@click.command(cls=CeleryCommand)
@@ -17,6 +18,7 @@
help_group='Result Options',
help="Show traceback instead.")
@click.pass_context
+@handle_preload_options
def result(ctx, task_id, task, traceback):
"""Print the return value for a given task id."""
app = ctx.obj.app
diff --git a/celery/bin/shell.py b/celery/bin/shell.py
index b3b77e02fdb..378448a24cf 100644
--- a/celery/bin/shell.py
+++ b/celery/bin/shell.py
@@ -6,7 +6,8 @@
import click
-from celery.bin.base import CeleryCommand, CeleryOption
+from celery.bin.base import (CeleryCommand, CeleryOption,
+ handle_preload_options)
def _invoke_fallback_shell(locals):
@@ -114,6 +115,7 @@ def _invoke_default_shell(locals):
help_group="Shell Options",
help="Use gevent.")
@click.pass_context
+@handle_preload_options
def shell(ctx, ipython=False, bpython=False,
python=False, without_tasks=False, eventlet=False,
gevent=False):
diff --git a/celery/bin/upgrade.py b/celery/bin/upgrade.py
index 1518297172c..e083995b674 100644
--- a/celery/bin/upgrade.py
+++ b/celery/bin/upgrade.py
@@ -5,11 +5,13 @@
import click
from celery.app import defaults
-from celery.bin.base import CeleryCommand, CeleryOption
+from celery.bin.base import (CeleryCommand, CeleryOption,
+ handle_preload_options)
from celery.utils.functional import pass1
@click.group()
+@handle_preload_options
def upgrade():
"""Perform upgrade between versions."""
diff --git a/celery/bin/worker.py b/celery/bin/worker.py
index cd826b89b17..ca16a19b4e3 100644
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -9,7 +9,8 @@
from celery import concurrency
from celery.bin.base import (COMMA_SEPARATED_LIST, LOG_LEVEL,
- CeleryDaemonCommand, CeleryOption)
+ CeleryDaemonCommand, CeleryOption,
+ handle_preload_options)
from celery.platforms import (EX_FAILURE, EX_OK, detached,
maybe_drop_privileges)
from celery.utils.log import get_logger
@@ -273,6 +274,7 @@ def detach(path, argv, logfile=None, pidfile=None, uid=None,
cls=CeleryOption,
help_group="Embedded Beat Options")
@click.pass_context
+@handle_preload_options
def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
loglevel=None, logfile=None, pidfile=None, statedb=None,
**kwargs):
diff --git a/docs/userguide/extending.rst b/docs/userguide/extending.rst
index 443255e1789..cf3a9929be8 100644
--- a/docs/userguide/extending.rst
+++ b/docs/userguide/extending.rst
@@ -769,29 +769,22 @@ Preload options
~~~~~~~~~~~~~~~
The :program:`celery` umbrella command supports the concept of 'preload
-options'. These are special options passed to all sub-commands and parsed
-outside of the main parsing step.
+options'. These are special options passed to all sub-commands.
-The list of default preload options can be found in the API reference:
-:mod:`celery.bin.base`.
-
-You can add new preload options too, for example to specify a configuration
+You can add new preload options, for example to specify a configuration
template:
.. code-block:: python
from celery import Celery
from celery import signals
- from celery.bin import Option
+ from click import Option
app = Celery()
- def add_preload_options(parser):
- parser.add_argument(
- '-Z', '--template', default='default',
- help='Configuration template to use.',
- )
- app.user_options['preload'].add(add_preload_options)
+ app.user_options['preload'].add(Option(('-Z', '--template'),
+ default='default',
+ help='Configuration template to use.'))
@signals.user_preload_options.connect
def on_preload_parsed(options, **kwargs):
From 39db90cc83b8a283933fb5a6d1b16b46837d1ced Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 3 Dec 2020 18:19:47 +0200
Subject: [PATCH 138/157] Changelog for 5.0.3.
---
Changelog.rst | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/Changelog.rst b/Changelog.rst
index b65686a6708..407d9e4acc3 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -8,6 +8,22 @@ This document contains change notes for bugfix & new features
in the 5.0.x series, please see :ref:`whatsnew-5.0` for
an overview of what's new in Celery 5.0.
+.. _version-5.0.3:
+
+5.0.3
+=====
+:release-date: 2020-12-03 6.30 P.M UTC+2:00
+:release-by: Omer Katz
+
+- Make `--workdir` eager for early handling (#6457).
+- When using the MongoDB backend, don't cleanup if result_expires is 0 or None (#6462).
+- Fix passing queues into purge command (#6469).
+- Restore `app.start()` and `app.worker_main()` (#6481).
+- Detaching no longer creates an extra log file (#6426).
+- Result backend instances are now thread local to ensure thread safety (#6416).
+- Don't upgrade click to 8.x since click-repl doesn't support it yet.
+- Restore preload options (#6516).
+
.. _version-5.0.2:
5.0.2
From a4d942b3156961a8fdd6829121bdc52fc99da30a Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Thu, 3 Dec 2020 18:20:36 +0200
Subject: [PATCH 139/157] =?UTF-8?q?Bump=20version:=205.0.2=20=E2=86=92=205?=
=?UTF-8?q?.0.3?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.bumpversion.cfg | 2 +-
README.rst | 6 +++---
celery/__init__.py | 2 +-
docs/includes/introduction.txt | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 7be80a9bab6..6ea6b829c07 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 5.0.2
+current_version = 5.0.3
commit = True
tag = True
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(?P[a-z\d]+)?
diff --git a/README.rst b/README.rst
index 529669641d9..31c09d27b39 100644
--- a/README.rst
+++ b/README.rst
@@ -2,7 +2,7 @@
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge|
-:Version: 5.0.2 (singularity)
+:Version: 5.0.3 (singularity)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
@@ -57,7 +57,7 @@ in such a way that the client enqueues an URL to be requested by a worker.
What do I need?
===============
-Celery version 5.0.2 runs on,
+Celery version 5.0.3 runs on,
- Python (3.6, 3.7, 3.8)
- PyPy3.6 (7.6)
@@ -89,7 +89,7 @@ Get Started
===========
If this is the first time you're trying to use Celery, or you're
-new to Celery 5.0.2 coming from previous versions then you should read our
+new to Celery 5.0.3 coming from previous versions then you should read our
getting started tutorials:
- `First steps with Celery`_
diff --git a/celery/__init__.py b/celery/__init__.py
index 7ed8e28cb0a..b4d9bb899a8 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -17,7 +17,7 @@
SERIES = 'singularity'
-__version__ = '5.0.2'
+__version__ = '5.0.3'
__author__ = 'Ask Solem'
__contact__ = 'auvipy@gmail.com'
__homepage__ = 'http://celeryproject.org'
diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt
index a19bd2a012a..70751c92c17 100644
--- a/docs/includes/introduction.txt
+++ b/docs/includes/introduction.txt
@@ -1,4 +1,4 @@
-:Version: 5.0.2 (cliffs)
+:Version: 5.0.3 (cliffs)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
From a192f9cbf546e36b590166426d5e26a90964eeb1 Mon Sep 17 00:00:00 2001
From: Matus Valo
Date: Sun, 6 Dec 2020 11:03:31 +0100
Subject: [PATCH 140/157] Added integration tests for calling a task (#6523)
---
t/integration/tasks.py | 41 ++++++--
t/integration/test_tasks.py | 200 +++++++++++++++++++++++++++++++++++-
2 files changed, 232 insertions(+), 9 deletions(-)
diff --git a/t/integration/tasks.py b/t/integration/tasks.py
index 1aaeed32378..2b4937a3725 100644
--- a/t/integration/tasks.py
+++ b/t/integration/tasks.py
@@ -16,15 +16,18 @@ def identity(x):
@shared_task
-def add(x, y):
- """Add two numbers."""
- return x + y
+def add(x, y, z=None):
+ """Add two or three numbers."""
+ if z:
+ return x + y + z
+ else:
+ return x + y
-@shared_task
-def raise_error(*args):
- """Deliberately raise an error."""
- raise ValueError("deliberate error")
+@shared_task(typing=False)
+def add_not_typed(x, y):
+ """Add two numbers, but don't check arguments"""
+ return x + y
@shared_task(ignore_result=True)
@@ -33,6 +36,12 @@ def add_ignore_result(x, y):
return x + y
+@shared_task
+def raise_error(*args):
+ """Deliberately raise an error."""
+ raise ValueError("deliberate error")
+
+
@shared_task
def chain_add(x, y):
(
@@ -162,6 +171,24 @@ def collect_ids(self, res, i):
return res, (self.request.root_id, self.request.parent_id, i)
+@shared_task(bind=True, default_retry_delay=1)
+def retry(self, return_value=None):
+ """Task simulating multiple retries.
+
+ When return_value is provided, the task after retries returns
+ the result. Otherwise it fails.
+ """
+ if return_value:
+ attempt = getattr(self, 'attempt', 0)
+ print('attempt', attempt)
+ if attempt >= 3:
+ delattr(self, 'attempt')
+ return return_value
+ self.attempt = attempt + 1
+
+ raise self.retry(exc=ExpectedException(), countdown=5)
+
+
@shared_task(bind=True, expires=60.0, max_retries=1)
def retry_once(self, *args, expires=60.0, max_retries=1, countdown=0.1):
"""Task that fails and is retried. Returns the number of retries."""
diff --git a/t/integration/test_tasks.py b/t/integration/test_tasks.py
index edfda576f5b..ca71196a283 100644
--- a/t/integration/test_tasks.py
+++ b/t/integration/test_tasks.py
@@ -1,10 +1,14 @@
+from datetime import datetime, timedelta
+from time import sleep, perf_counter
+
import pytest
+import celery
from celery import group
from .conftest import get_active_redis_channels
-from .tasks import (ClassBasedAutoRetryTask, add, add_ignore_result,
- print_unicode, retry_once, retry_once_priority, sleeping)
+from .tasks import (ClassBasedAutoRetryTask, add, add_ignore_result, add_not_typed, retry,
+ print_unicode, retry_once, retry_once_priority, sleeping, fail, ExpectedException)
TIMEOUT = 10
@@ -28,8 +32,200 @@ def test_class_based_task_retried(self, celery_session_app,
assert res.get(timeout=TIMEOUT) == 1
+def _producer(j):
+ """Single producer helper function"""
+ results = []
+ for i in range(20):
+ results.append([i + j, add.delay(i, j)])
+ for expected, result in results:
+ value = result.get(timeout=10)
+ assert value == expected
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ assert result.successful() is True
+ return j
+
+
class test_tasks:
+ def test_simple_call(self):
+ """Tests direct simple call of task"""
+ assert add(1, 1) == 2
+ assert add(1, 1, z=1) == 3
+
+ @flaky
+ def test_basic_task(self, manager):
+ """Tests basic task call"""
+ results = []
+ # Tests calling task only with args
+ for i in range(10):
+ results.append([i + i, add.delay(i, i)])
+ for expected, result in results:
+ value = result.get(timeout=10)
+ assert value == expected
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ assert result.successful() is True
+
+ results = []
+ # Tests calling task with args and kwargs
+ for i in range(10):
+ results.append([3*i, add.delay(i, i, z=i)])
+ for expected, result in results:
+ value = result.get(timeout=10)
+ assert value == expected
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ assert result.successful() is True
+
+ @flaky
+ def test_multiprocess_producer(self, manager):
+ """Testing multiple processes calling tasks."""
+ from multiprocessing import Pool
+ pool = Pool(20)
+ ret = pool.map(_producer, range(120))
+ assert list(ret) == list(range(120))
+
+ @flaky
+ def test_multithread_producer(self, manager):
+ """Testing multiple threads calling tasks."""
+ from multiprocessing.pool import ThreadPool
+ pool = ThreadPool(20)
+ ret = pool.map(_producer, range(120))
+ assert list(ret) == list(range(120))
+
+ @flaky
+ def test_ignore_result(self, manager):
+ """Testing calling task with ignoring results."""
+ result = add.apply_async((1, 2), ignore_result=True)
+ assert result.get() is None
+
+ @flaky
+ def test_timeout(self, manager):
+ """Testing timeout of getting results from tasks."""
+ result = sleeping.delay(10)
+ with pytest.raises(celery.exceptions.TimeoutError):
+ result.get(timeout=5)
+
+ @flaky
+ def test_expired(self, manager):
+ """Testing expiration of task."""
+ # Fill the queue with tasks which took > 1 sec to process
+ for _ in range(4):
+ sleeping.delay(2)
+ # Execute task with expiration = 1 sec
+ result = add.apply_async((1, 1), expires=1)
+ with pytest.raises(celery.exceptions.TaskRevokedError):
+ result.get()
+ assert result.status == 'REVOKED'
+ assert result.ready() is True
+ assert result.failed() is False
+ assert result.successful() is False
+
+ # Fill the queue with tasks which took > 1 sec to process
+ for _ in range(4):
+ sleeping.delay(2)
+ # Execute task with expiration at now + 1 sec
+ result = add.apply_async((1, 1), expires=datetime.utcnow() + timedelta(seconds=1))
+ with pytest.raises(celery.exceptions.TaskRevokedError):
+ result.get()
+ assert result.status == 'REVOKED'
+ assert result.ready() is True
+ assert result.failed() is False
+ assert result.successful() is False
+
+ @flaky
+ def test_eta(self, manager):
+ """Tests tasks scheduled at some point in future."""
+ start = perf_counter()
+ # Schedule task to be executed in 3 seconds
+ result = add.apply_async((1, 1), countdown=3)
+ sleep(1)
+ assert result.status == 'PENDING'
+ assert result.ready() is False
+ assert result.get() == 2
+ end = perf_counter()
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ # Difference between calling the task and result must be bigger than 3 secs
+ assert (end - start) > 3
+
+ start = perf_counter()
+ # Schedule task to be executed at time now + 3 seconds
+ result = add.apply_async((2, 2), eta=datetime.utcnow() + timedelta(seconds=3))
+ sleep(1)
+ assert result.status == 'PENDING'
+ assert result.ready() is False
+ assert result.get() == 4
+ end = perf_counter()
+ assert result.status == 'SUCCESS'
+ assert result.ready() is True
+ # Difference between calling the task and result must be bigger than 3 secs
+ assert (end - start) > 3
+
+ @flaky
+ def test_fail(self, manager):
+ """Tests that the failing task propagates back correct exception."""
+ result = fail.delay()
+ with pytest.raises(ExpectedException):
+ result.get(timeout=5)
+ assert result.status == 'FAILURE'
+ assert result.ready() is True
+ assert result.failed() is True
+ assert result.successful() is False
+
+ @flaky
+ def test_wrong_arguments(self, manager):
+ """Tests that proper exceptions are raised when task is called with wrong arguments."""
+ with pytest.raises(TypeError):
+ add(5)
+
+ with pytest.raises(TypeError):
+ add(5, 5, wrong_arg=5)
+
+ with pytest.raises(TypeError):
+ add.delay(5)
+
+ with pytest.raises(TypeError):
+ add.delay(5, wrong_arg=5)
+
+ # Tasks with typing=False are not checked but execution should fail
+ result = add_not_typed.delay(5)
+ with pytest.raises(TypeError):
+ result.get(timeout=5)
+ assert result.status == 'FAILURE'
+
+ result = add_not_typed.delay(5, wrong_arg=5)
+ with pytest.raises(TypeError):
+ result.get(timeout=5)
+ assert result.status == 'FAILURE'
+
+ @flaky
+ def test_retry(self, manager):
+ """Tests retrying of task."""
+ # Tests when max. retries is reached
+ result = retry.delay()
+ for _ in range(5):
+ status = result.status
+ if status != 'PENDING':
+ break
+ sleep(1)
+ assert status == 'RETRY'
+ with pytest.raises(ExpectedException):
+ result.get()
+ assert result.status == 'FAILURE'
+
+ # Tests when task is retried but after returns correct result
+ result = retry.delay(return_value='bar')
+ for _ in range(5):
+ status = result.status
+ if status != 'PENDING':
+ break
+ sleep(1)
+ assert status == 'RETRY'
+ assert result.get() == 'bar'
+ assert result.status == 'SUCCESS'
+
@flaky
def test_task_accepted(self, manager, sleep=1):
r1 = sleeping.delay(sleep)
From c3e041050ae252be79d9b4ae400ec0c5b2831d14 Mon Sep 17 00:00:00 2001
From: Matus Valo
Date: Mon, 7 Dec 2020 12:27:59 +0100
Subject: [PATCH 141/157] DummyClient of cache+memory:// backend now shares
state between threads (#6524)
---
celery/backends/cache.py | 6 +++++-
t/unit/backends/test_cache.py | 10 ++++++++++
2 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/celery/backends/cache.py b/celery/backends/cache.py
index 01ac1ac3e5f..e340f31b7f6 100644
--- a/celery/backends/cache.py
+++ b/celery/backends/cache.py
@@ -20,6 +20,10 @@
Please use one of the following backends instead: {1}\
"""
+# Global shared in-memory cache for in-memory cache client
+# This is to share cache between threads
+_DUMMY_CLIENT_CACHE = LRUCache(limit=5000)
+
def import_best_memcache():
if _imp[0] is None:
@@ -53,7 +57,7 @@ def Client(*args, **kwargs): # noqa
class DummyClient:
def __init__(self, *args, **kwargs):
- self.cache = LRUCache(limit=5000)
+ self.cache = _DUMMY_CLIENT_CACHE
def get(self, key, *args, **kwargs):
return self.cache.get(key)
diff --git a/t/unit/backends/test_cache.py b/t/unit/backends/test_cache.py
index 6bd23d9d3d2..8400729017d 100644
--- a/t/unit/backends/test_cache.py
+++ b/t/unit/backends/test_cache.py
@@ -35,6 +35,16 @@ def test_no_backend(self):
with pytest.raises(ImproperlyConfigured):
CacheBackend(backend=None, app=self.app)
+ def test_memory_client_is_shared(self):
+ """This test verifies that memory:// backend state is shared over multiple threads"""
+ from threading import Thread
+ t = Thread(
+ target=lambda: CacheBackend(backend='memory://', app=self.app).set('test', 12345)
+ )
+ t.start()
+ t.join()
+ assert self.tb.client.get('test') == 12345
+
def test_mark_as_done(self):
assert self.tb.get_state(self.tid) == states.PENDING
assert self.tb.get_result(self.tid) is None
From f9ccba9160705ae18742a0923b6e574a2fcee097 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Tue, 8 Dec 2020 14:38:59 +0200
Subject: [PATCH 142/157] isort.
---
t/integration/test_tasks.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/t/integration/test_tasks.py b/t/integration/test_tasks.py
index ca71196a283..ba5f4fbba77 100644
--- a/t/integration/test_tasks.py
+++ b/t/integration/test_tasks.py
@@ -1,5 +1,5 @@
from datetime import datetime, timedelta
-from time import sleep, perf_counter
+from time import perf_counter, sleep
import pytest
@@ -7,8 +7,9 @@
from celery import group
from .conftest import get_active_redis_channels
-from .tasks import (ClassBasedAutoRetryTask, add, add_ignore_result, add_not_typed, retry,
- print_unicode, retry_once, retry_once_priority, sleeping, fail, ExpectedException)
+from .tasks import (ClassBasedAutoRetryTask, ExpectedException, add,
+ add_ignore_result, add_not_typed, fail, print_unicode,
+ retry, retry_once, retry_once_priority, sleeping)
TIMEOUT = 10
From 0674684dfd6cc29d3b5dbb6d2073895e12bfd2c9 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Tue, 8 Dec 2020 14:40:15 +0200
Subject: [PATCH 143/157] Update changelog.
---
Changelog.rst | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/Changelog.rst b/Changelog.rst
index 407d9e4acc3..ba46d1d59ba 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -8,6 +8,20 @@ This document contains change notes for bugfix & new features
in the 5.0.x series, please see :ref:`whatsnew-5.0` for
an overview of what's new in Celery 5.0.
+.. _version-5.0.4:
+
+5.0.4
+=====
+:release-date: 2020-12-08 2.40 P.M UTC+2:00
+:release-by: Omer Katz
+
+- DummyClient of cache+memory:// backend now shares state between threads (#6524).
+
+ This fixes a problem when using our pytest integration with the in memory
+ result backend.
+ Because the state wasn't shared between threads, #6416 results in test suites
+ hanging on `result.get()`.
+
.. _version-5.0.3:
5.0.3
From 3bb2d58620c5e83ad7cdc18cdfe917dccde74088 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Tue, 8 Dec 2020 14:40:25 +0200
Subject: [PATCH 144/157] =?UTF-8?q?Bump=20version:=205.0.3=20=E2=86=92=205?=
=?UTF-8?q?.0.4?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.bumpversion.cfg | 2 +-
README.rst | 6 +++---
celery/__init__.py | 2 +-
docs/includes/introduction.txt | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 6ea6b829c07..14682ce6b9a 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 5.0.3
+current_version = 5.0.4
commit = True
tag = True
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(?P[a-z\d]+)?
diff --git a/README.rst b/README.rst
index 31c09d27b39..22a9fc115bd 100644
--- a/README.rst
+++ b/README.rst
@@ -2,7 +2,7 @@
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge|
-:Version: 5.0.3 (singularity)
+:Version: 5.0.4 (singularity)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
@@ -57,7 +57,7 @@ in such a way that the client enqueues an URL to be requested by a worker.
What do I need?
===============
-Celery version 5.0.3 runs on,
+Celery version 5.0.4 runs on,
- Python (3.6, 3.7, 3.8)
- PyPy3.6 (7.6)
@@ -89,7 +89,7 @@ Get Started
===========
If this is the first time you're trying to use Celery, or you're
-new to Celery 5.0.3 coming from previous versions then you should read our
+new to Celery 5.0.4 coming from previous versions then you should read our
getting started tutorials:
- `First steps with Celery`_
diff --git a/celery/__init__.py b/celery/__init__.py
index b4d9bb899a8..c0feb1712db 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -17,7 +17,7 @@
SERIES = 'singularity'
-__version__ = '5.0.3'
+__version__ = '5.0.4'
__author__ = 'Ask Solem'
__contact__ = 'auvipy@gmail.com'
__homepage__ = 'http://celeryproject.org'
diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt
index 70751c92c17..ec37039072f 100644
--- a/docs/includes/introduction.txt
+++ b/docs/includes/introduction.txt
@@ -1,4 +1,4 @@
-:Version: 5.0.3 (cliffs)
+:Version: 5.0.4 (cliffs)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
From 420e3931a63538bd225ef57916deccf53cbcb57a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Franti=C5=A1ek=20Zatloukal?=
Date: Tue, 8 Dec 2020 17:18:29 +0100
Subject: [PATCH 145/157] Change deprecated from collections import
Mapping/MutableMapping to from collections.abc ... (#6532)
---
t/unit/utils/test_collections.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/t/unit/utils/test_collections.py b/t/unit/utils/test_collections.py
index 1830c7ce7cd..20005288cee 100644
--- a/t/unit/utils/test_collections.py
+++ b/t/unit/utils/test_collections.py
@@ -1,5 +1,5 @@
import pickle
-from collections import Mapping
+from collections.abc import Mapping
from itertools import count
from time import monotonic
@@ -129,11 +129,11 @@ def test_len(self):
assert len(self.view) == 2
def test_isa_mapping(self):
- from collections import Mapping
+ from collections.abc import Mapping
assert issubclass(ConfigurationView, Mapping)
def test_isa_mutable_mapping(self):
- from collections import MutableMapping
+ from collections.abc import MutableMapping
assert issubclass(ConfigurationView, MutableMapping)
From 5fa063afce60f904120cba7f8a4ac5ee0e722b15 Mon Sep 17 00:00:00 2001
From: elonzh
Date: Thu, 10 Dec 2020 00:05:34 +0800
Subject: [PATCH 146/157] fix #6047
---
docs/django/first-steps-with-django.rst | 2 +-
docs/userguide/configuration.rst | 6 +++++-
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/docs/django/first-steps-with-django.rst b/docs/django/first-steps-with-django.rst
index 55d64c990eb..f3a20b18a48 100644
--- a/docs/django/first-steps-with-django.rst
+++ b/docs/django/first-steps-with-django.rst
@@ -210,7 +210,7 @@ To use this with your project you need to follow these steps:
.. code-block:: python
- CELERY_CACHE_BACKEND = 'django-cache'
+ CELERY_RESULT_BACKEND = 'django-cache'
We can also use the cache defined in the CACHES setting in django.
diff --git a/docs/userguide/configuration.rst b/docs/userguide/configuration.rst
index e9c1c76c151..7142cd6ac16 100644
--- a/docs/userguide/configuration.rst
+++ b/docs/userguide/configuration.rst
@@ -1028,9 +1028,13 @@ setting:
``cache_backend``
~~~~~~~~~~~~~~~~~
-This setting is no longer used as it's now possible to specify
+This setting is no longer used in celery's builtin backends as it's now possible to specify
the cache backend directly in the :setting:`result_backend` setting.
+.. note::
+
+ The :ref:`django-celery-results` library uses ``cache_backend`` for choosing django caches.
+
.. _conf-mongodb-result-backend:
MongoDB backend settings
From 8bceb446e6a07682d4b8dd6199cdac450bd63578 Mon Sep 17 00:00:00 2001
From: Sven Koitka
Date: Thu, 10 Dec 2020 12:42:32 +0100
Subject: [PATCH 147/157] Fix type error in S3 backend (#6537)
* Convert key from bytes to str
* Add unit test for S3 delete of key with type bytes
---
celery/backends/s3.py | 1 +
t/unit/backends/test_s3.py | 11 ++++++-----
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/celery/backends/s3.py b/celery/backends/s3.py
index c102073ccca..ea04ae373d1 100644
--- a/celery/backends/s3.py
+++ b/celery/backends/s3.py
@@ -72,6 +72,7 @@ def set(self, key, value):
s3_object.put(Body=value)
def delete(self, key):
+ key = bytes_to_str(key)
s3_object = self._get_s3_object(key)
s3_object.delete()
diff --git a/t/unit/backends/test_s3.py b/t/unit/backends/test_s3.py
index 5733bb6fca4..fdea04b32cc 100644
--- a/t/unit/backends/test_s3.py
+++ b/t/unit/backends/test_s3.py
@@ -140,8 +140,9 @@ def test_with_error_while_getting_key(self, mock_boto3):
with pytest.raises(ClientError):
s3_backend.get('uuidddd')
+ @pytest.mark.parametrize("key", ['uuid', b'uuid'])
@mock_s3
- def test_delete_a_key(self):
+ def test_delete_a_key(self, key):
self._mock_s3_resource()
self.app.conf.s3_access_key_id = 'somekeyid'
@@ -149,12 +150,12 @@ def test_delete_a_key(self):
self.app.conf.s3_bucket = 'bucket'
s3_backend = S3Backend(app=self.app)
- s3_backend._set_with_state('uuid', 'another_status', states.SUCCESS)
- assert s3_backend.get('uuid') == 'another_status'
+ s3_backend._set_with_state(key, 'another_status', states.SUCCESS)
+ assert s3_backend.get(key) == 'another_status'
- s3_backend.delete('uuid')
+ s3_backend.delete(key)
- assert s3_backend.get('uuid') is None
+ assert s3_backend.get(key) is None
@mock_s3
def test_with_a_non_existing_bucket(self):
From 7d59e50d87d260c9459cbc890e3bce0592dd5f99 Mon Sep 17 00:00:00 2001
From: Arnon Yaari
Date: Tue, 15 Dec 2020 16:29:11 +0200
Subject: [PATCH 148/157] events.py: Remove duplicate decorator in wrong place
(#6543)
`@handle_preload_options` was specified twice as a decorator of `events`, once at the top (wrong) and once at the bottom (right).
This fixes the `celery events` commands and also `celery --help`
---
celery/bin/events.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/celery/bin/events.py b/celery/bin/events.py
index dc535f5b7b7..26b67374aad 100644
--- a/celery/bin/events.py
+++ b/celery/bin/events.py
@@ -48,7 +48,6 @@ def _run_evtop(app):
raise click.UsageError("The curses module is required for this command.")
-@handle_preload_options
@click.command(cls=CeleryDaemonCommand)
@click.option('-d',
'--dump',
From 8aa4eb8e7a2c5874f007b40604193b56871f5368 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 16 Dec 2020 17:32:36 +0200
Subject: [PATCH 149/157] Update changelog.
---
Changelog.rst | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/Changelog.rst b/Changelog.rst
index ba46d1d59ba..0bdb9947f8c 100644
--- a/Changelog.rst
+++ b/Changelog.rst
@@ -8,6 +8,16 @@ This document contains change notes for bugfix & new features
in the 5.0.x series, please see :ref:`whatsnew-5.0` for
an overview of what's new in Celery 5.0.
+.. _version-5.0.5:
+
+5.0.5
+=====
+:release-date: 2020-12-16 5.35 P.M UTC+2:00
+:release-by: Omer Katz
+
+- Ensure keys are strings when deleting results from S3 (#6537).
+- Fix a regression breaking `celery --help` and `celery events` (#6543).
+
.. _version-5.0.4:
5.0.4
From 8492b75c579564c2af5c2be75fe4b2118ebd0cd1 Mon Sep 17 00:00:00 2001
From: Omer Katz
Date: Wed, 16 Dec 2020 17:33:33 +0200
Subject: [PATCH 150/157] =?UTF-8?q?Bump=20version:=205.0.4=20=E2=86=92=205?=
=?UTF-8?q?.0.5?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.bumpversion.cfg | 2 +-
README.rst | 6 +++---
celery/__init__.py | 2 +-
docs/includes/introduction.txt | 2 +-
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/.bumpversion.cfg b/.bumpversion.cfg
index 14682ce6b9a..0ce811df412 100644
--- a/.bumpversion.cfg
+++ b/.bumpversion.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 5.0.4
+current_version = 5.0.5
commit = True
tag = True
parse = (?P\d+)\.(?P\d+)\.(?P\d+)(?P[a-z\d]+)?
diff --git a/README.rst b/README.rst
index 22a9fc115bd..e1cdae5ee0e 100644
--- a/README.rst
+++ b/README.rst
@@ -2,7 +2,7 @@
|build-status| |coverage| |license| |wheel| |pyversion| |pyimp| |ocbackerbadge| |ocsponsorbadge|
-:Version: 5.0.4 (singularity)
+:Version: 5.0.5 (singularity)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
@@ -57,7 +57,7 @@ in such a way that the client enqueues an URL to be requested by a worker.
What do I need?
===============
-Celery version 5.0.4 runs on,
+Celery version 5.0.5 runs on,
- Python (3.6, 3.7, 3.8)
- PyPy3.6 (7.6)
@@ -89,7 +89,7 @@ Get Started
===========
If this is the first time you're trying to use Celery, or you're
-new to Celery 5.0.4 coming from previous versions then you should read our
+new to Celery 5.0.5 coming from previous versions then you should read our
getting started tutorials:
- `First steps with Celery`_
diff --git a/celery/__init__.py b/celery/__init__.py
index c0feb1712db..ae3388c0e56 100644
--- a/celery/__init__.py
+++ b/celery/__init__.py
@@ -17,7 +17,7 @@
SERIES = 'singularity'
-__version__ = '5.0.4'
+__version__ = '5.0.5'
__author__ = 'Ask Solem'
__contact__ = 'auvipy@gmail.com'
__homepage__ = 'http://celeryproject.org'
diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt
index ec37039072f..11a99ec278b 100644
--- a/docs/includes/introduction.txt
+++ b/docs/includes/introduction.txt
@@ -1,4 +1,4 @@
-:Version: 5.0.4 (cliffs)
+:Version: 5.0.5 (cliffs)
:Web: http://celeryproject.org/
:Download: https://pypi.org/project/celery/
:Source: https://github.com/celery/celery/
From 4adb020e3bbbbde9e222944c10ce6d8f8bb440a1 Mon Sep 17 00:00:00 2001
From: Chris Lee
Date: Fri, 18 Sep 2020 03:38:39 -0400
Subject: [PATCH 151/157] ADD: indico additions - trails
---
.vscode/settings.json | 3 +++
celery/result.py | 24 +++++++++++++++++++++++-
celery/states.py | 5 +++--
3 files changed, 29 insertions(+), 3 deletions(-)
create mode 100644 .vscode/settings.json
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 00000000000..d3def91314f
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "editor.formatOnSave": false
+}
diff --git a/celery/result.py b/celery/result.py
index 0c10d58e86c..73340455646 100644
--- a/celery/result.py
+++ b/celery/result.py
@@ -554,6 +554,14 @@ def _on_ready(self):
if self.backend.is_async:
self.on_ready()
+ def collect(self, **kwargs):
+ for task in self.results:
+ task_results = list(task.collect(**kwargs))
+ if isinstance(task, ResultSet):
+ yield task_results
+ else:
+ yield task_results[-1]
+
def remove(self, result):
"""Remove result from the set; it must be a member.
@@ -666,7 +674,8 @@ def __getitem__(self, index):
def get(self, timeout=None, propagate=True, interval=0.5,
callback=None, no_ack=True, on_message=None,
- disable_sync_subtasks=True, on_interval=None):
+ disable_sync_subtasks=True, on_interval=None, **kwargs):
+ # PATCH: added kwargs for more generalized interface
"""See :meth:`join`.
This is here for API compatibility with :class:`AsyncResult`,
@@ -949,6 +958,14 @@ def as_tuple(self):
def children(self):
return self.results
+ @property
+ def state(self):
+ for child in self.children:
+ if (child.state in states.EXCEPTION_STATES
+ or child.state in states.UNREADY_STATES):
+ break
+ return child.state
+
@classmethod
def restore(cls, id, backend=None, app=None):
"""Restore previously saved group result."""
@@ -1065,3 +1082,8 @@ def result_from_tuple(r, app=None):
return Result(id, parent=parent)
return r
+
+
+def get_exception_in_callback(task_id: str) -> Exception:
+ with allow_join_result():
+ return AsyncResult(task_id).get(propagate=False)
\ No newline at end of file
diff --git a/celery/states.py b/celery/states.py
index e807ed4822c..375ca72b9f5 100644
--- a/celery/states.py
+++ b/celery/states.py
@@ -140,12 +140,13 @@ def __le__(self, other):
#: Task is waiting for retry.
RETRY = 'RETRY'
IGNORED = 'IGNORED'
+TRAILED = 'TRAILED'
READY_STATES = frozenset({SUCCESS, FAILURE, REVOKED})
-UNREADY_STATES = frozenset({PENDING, RECEIVED, STARTED, REJECTED, RETRY})
+UNREADY_STATES = frozenset({PENDING, RECEIVED, STARTED, REJECTED, RETRY, TRAILED})
EXCEPTION_STATES = frozenset({RETRY, FAILURE, REVOKED})
PROPAGATE_STATES = frozenset({FAILURE, REVOKED})
ALL_STATES = frozenset({
- PENDING, RECEIVED, STARTED, SUCCESS, FAILURE, RETRY, REVOKED,
+ PENDING, RECEIVED, STARTED, SUCCESS, FAILURE, RETRY, REVOKED, TRAILED
})
From b92b6473bc3731935827435d0f784dcc5c0717d1 Mon Sep 17 00:00:00 2001
From: Chris Lee
Date: Fri, 2 Oct 2020 16:48:57 -0400
Subject: [PATCH 152/157] FIX: remove dev.txt dependencies
---
requirements/dev.txt | 5 -----
1 file changed, 5 deletions(-)
delete mode 100644 requirements/dev.txt
diff --git a/requirements/dev.txt b/requirements/dev.txt
deleted file mode 100644
index 9712c15a2e3..00000000000
--- a/requirements/dev.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-pytz>dev
-git+https://github.com/celery/kombu.git
-git+https://github.com/celery/py-amqp.git
-git+https://github.com/celery/billiard.git
-vine==1.3.0
\ No newline at end of file
From bfff3e30b9333d12973e69983778d59c953e390a Mon Sep 17 00:00:00 2001
From: Chris Lee
Date: Wed, 7 Oct 2020 00:43:11 -0400
Subject: [PATCH 153/157] ADD: handle revoke failures
---
celery/app/trace.py | 1 -
celery/backends/base.py | 9 ++++-----
celery/backends/redis.py | 16 +++++++++++++++-
celery/canvas.py | 3 +--
4 files changed, 20 insertions(+), 9 deletions(-)
diff --git a/celery/app/trace.py b/celery/app/trace.py
index f9b8c83e6e6..3b004d2d75d 100644
--- a/celery/app/trace.py
+++ b/celery/app/trace.py
@@ -208,7 +208,6 @@ def handle_failure(self, task, req, store_errors=True, call_errbacks=True):
einfo = ExceptionInfo()
einfo.exception = get_pickleable_exception(einfo.exception)
einfo.type = get_pickleable_etype(einfo.type)
-
task.backend.mark_as_failure(
req.id, exc, einfo.traceback,
request=req, store_result=store_errors,
diff --git a/celery/backends/base.py b/celery/backends/base.py
index 1aac2a0fc95..c57897005f8 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -218,11 +218,10 @@ def _call_task_errbacks(self, request, exc, traceback):
def mark_as_revoked(self, task_id, reason='',
request=None, store_result=True, state=states.REVOKED):
exc = TaskRevokedError(reason)
- if store_result:
- self.store_result(task_id, exc, state,
- traceback=None, request=request)
- if request and request.chord:
- self.on_chord_part_return(request, state, exc)
+
+ return self.mark_as_failure(
+ task_id, exc, request=request, store_result=store_result, state=state
+ )
def mark_as_retry(self, task_id, exc, traceback=None,
request=None, store_result=True, state=states.RETRY):
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
index dd3677f569c..cce2dc92c2a 100644
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -1,4 +1,5 @@
"""Redis result store backend."""
+import uuid
import time
from contextlib import contextmanager
from functools import partial
@@ -12,7 +13,7 @@
from celery import states
from celery._state import task_join_will_block
from celery.canvas import maybe_signature
-from celery.exceptions import ChordError, ImproperlyConfigured
+from celery.exceptions import ChordError, ImproperlyConfigured, TaskRevokedError
from celery.result import GroupResult, allow_join_result
from celery.utils.functional import dictfilter
from celery.utils.log import get_logger
@@ -397,6 +398,10 @@ def _unpack_chord_result(self, tup, decode,
_, tid, state, retval = decode(tup)
if state in EXCEPTION_STATES:
retval = self.exception_to_python(retval)
+
+ if isinstance(retval, TaskRevokedError):
+ raise retval
+
if state in PROPAGATE_STATES:
raise ChordError(f'Dependency {tid} raised {retval!r}')
return retval
@@ -484,6 +489,15 @@ def on_chord_part_return(self, request, state, result,
resl = [unpack(tup, decode) for tup in resl]
try:
callback.delay(resl)
+ except TaskRevokedError as exc:
+ logger.exception(
+ 'Group %r task was revoked: %r', request.group, exc)
+ if callback.id is None:
+ callback.id = str(uuid.uuid4())
+ return self.chord_error_from_stack(
+ callback,
+ exc
+ )
except Exception as exc: # pylint: disable=broad-except
logger.exception(
'Chord callback for %r raised: %r', request.group, exc)
diff --git a/celery/canvas.py b/celery/canvas.py
index a4de76428dc..6536ef5ab28 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -1088,8 +1088,7 @@ def apply_async(self, args=None, kwargs=None, add_to_parent=True,
if link is not None:
raise TypeError('Cannot add link to group: use a chord')
if link_error is not None:
- raise TypeError(
- 'Cannot add link to group: do that on individual tasks')
+ link_error = None
app = self.app
if app.conf.task_always_eager:
return self.apply(args, kwargs, **options)
From 89a2c330767a552f2871842b0fabb782afd4c0e9 Mon Sep 17 00:00:00 2001
From: Chris Lee
Date: Thu, 8 Oct 2020 10:37:17 -0400
Subject: [PATCH 154/157] ADD: trailer_request support and better drain
resolution
---
celery/app/amqp.py | 5 ++++-
celery/app/base.py | 4 ++--
celery/app/task.py | 4 ++++
celery/backends/asynchronous.py | 12 ++++++++----
celery/backends/base.py | 16 +++++++++++++++-
celery/backends/redis.py | 4 +++-
celery/canvas.py | 20 ++++++++++++--------
celery/worker/request.py | 7 ++++++-
8 files changed, 54 insertions(+), 18 deletions(-)
diff --git a/celery/app/amqp.py b/celery/app/amqp.py
index 1a0454e9a92..10d99cb9079 100644
--- a/celery/app/amqp.py
+++ b/celery/app/amqp.py
@@ -279,7 +279,7 @@ def TaskConsumer(self, channel, queues=None, accept=None, **kw):
def as_task_v2(self, task_id, name, args=None, kwargs=None,
countdown=None, eta=None, group_id=None, group_index=None,
- expires=None, retries=0, chord=None,
+ trailer_request=None, expires=None, retries=0, chord=None,
callbacks=None, errbacks=None, reply_to=None,
time_limit=None, soft_time_limit=None,
create_sent_event=False, root_id=None, parent_id=None,
@@ -336,6 +336,7 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
'expires': expires,
'group': group_id,
'group_index': group_index,
+ 'trailer_request': trailer_request,
'retries': retries,
'timelimit': [time_limit, soft_time_limit],
'root_id': root_id,
@@ -371,6 +372,7 @@ def as_task_v2(self, task_id, name, args=None, kwargs=None,
def as_task_v1(self, task_id, name, args=None, kwargs=None,
countdown=None, eta=None, group_id=None, group_index=None,
+ trailer_request=None,
expires=None, retries=0,
chord=None, callbacks=None, errbacks=None, reply_to=None,
time_limit=None, soft_time_limit=None,
@@ -415,6 +417,7 @@ def as_task_v1(self, task_id, name, args=None, kwargs=None,
'kwargs': kwargs,
'group': group_id,
'group_index': group_index,
+ 'trailer_request': trailer_request,
'retries': retries,
'eta': eta,
'expires': expires,
diff --git a/celery/app/base.py b/celery/app/base.py
index 27e5b610ca7..5de58b15b18 100644
--- a/celery/app/base.py
+++ b/celery/app/base.py
@@ -689,7 +689,7 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
router=None, result_cls=None, expires=None,
publisher=None, link=None, link_error=None,
add_to_parent=True, group_id=None, group_index=None,
- retries=0, chord=None,
+ trailer_request=None, retries=0, chord=None,
reply_to=None, time_limit=None, soft_time_limit=None,
root_id=None, parent_id=None, route_name=None,
shadow=None, chain=None, task_type=None, **options):
@@ -730,7 +730,7 @@ def send_task(self, name, args=None, kwargs=None, countdown=None,
message = amqp.create_task_message(
task_id, name, args, kwargs, countdown, eta, group_id, group_index,
- expires, retries, chord,
+ trailer_request, expires, retries, chord,
maybe_list(link), maybe_list(link_error),
reply_to or self.thread_oid, time_limit, soft_time_limit,
self.conf.task_send_sent_event,
diff --git a/celery/app/task.py b/celery/app/task.py
index 2265ebb9e67..1156ebfaec6 100644
--- a/celery/app/task.py
+++ b/celery/app/task.py
@@ -80,6 +80,7 @@ class Context:
taskset = None # compat alias to group
group = None
group_index = None
+ trailer_request = []
chord = None
chain = None
utc = None
@@ -114,6 +115,7 @@ def as_execution_options(self):
'parent_id': self.parent_id,
'group_id': self.group,
'group_index': self.group_index,
+ 'trailer_request': self.trailer_request or [],
'chord': self.chord,
'chain': self.chain,
'link': self.callbacks,
@@ -907,6 +909,7 @@ def replace(self, sig):
chord=chord,
group_id=self.request.group,
group_index=self.request.group_index,
+ trailer_request=self.request.trailer_request,
root_id=self.request.root_id,
)
sig.freeze(self.request.id)
@@ -934,6 +937,7 @@ def add_to_chord(self, sig, lazy=False):
sig.set(
group_id=self.request.group,
group_index=self.request.group_index,
+ trailer_request=self.request.trailer_request,
chord=self.request.chord,
root_id=self.request.root_id,
)
diff --git a/celery/backends/asynchronous.py b/celery/backends/asynchronous.py
index 32475d5eaa6..9a530235d8c 100644
--- a/celery/backends/asynchronous.py
+++ b/celery/backends/asynchronous.py
@@ -5,7 +5,7 @@
from collections import deque
from queue import Empty
from time import sleep
-from weakref import WeakKeyDictionary
+from weakref import WeakKeyDictionary, WeakSet
from kombu.utils.compat import detect_environment
@@ -173,7 +173,10 @@ def _maybe_resolve_from_buffer(self, result):
def _add_pending_result(self, task_id, result, weak=False):
concrete, weak_ = self._pending_results
if task_id not in weak_ and result.id not in concrete:
- (weak_ if weak else concrete)[task_id] = result
+ ref = (weak_ if weak else concrete)
+ results = ref.get(task_id, WeakSet() if weak else set())
+ results.add(result)
+ ref[task_id] = results
self.result_consumer.consume_from(task_id)
def add_pending_results(self, results, weak=False):
@@ -292,13 +295,14 @@ def on_state_change(self, meta, message):
if meta['status'] in states.READY_STATES:
task_id = meta['task_id']
try:
- result = self._get_pending_result(task_id)
+ results = self._get_pending_result(task_id)
except KeyError:
# send to buffer in case we received this result
# before it was added to _pending_results.
self._pending_messages.put(task_id, meta)
else:
- result._maybe_set_cache(meta)
+ for result in results:
+ result._maybe_set_cache(meta)
buckets = self.buckets
try:
# remove bucket for this result, since it's fulfilled
diff --git a/celery/backends/base.py b/celery/backends/base.py
index c57897005f8..37c26dc4be2 100644
--- a/celery/backends/base.py
+++ b/celery/backends/base.py
@@ -63,6 +63,12 @@
Result backends that supports chords: Redis, Database, Memcached, and more.
"""
+trailer_request_obj = namedtuple(
+ "trailer_request",
+ ("id", "group", "errbacks", "chord", "trailer_request", "group_index"),
+ defaults=(None, ) * 6
+)
+
def unpickle_backend(cls, args, kwargs):
"""Return an unpickled backend."""
@@ -130,7 +136,7 @@ def __init__(self, app,
self.base_sleep_between_retries_ms = conf.get('result_backend_base_sleep_between_retries_ms', 10)
self.max_retries = conf.get('result_backend_max_retries', float("inf"))
- self._pending_results = pending_results_t({}, WeakValueDictionary())
+ self._pending_results = pending_results_t({}, {})
self._pending_messages = BufferMap(MESSAGE_BUFFER_MAX)
self.url = url
@@ -164,6 +170,14 @@ def mark_as_failure(self, task_id, exc,
self.store_result(task_id, exc, state,
traceback=traceback, request=request)
if request:
+ if request.trailer_request:
+ self.mark_as_failure(
+ request.trailer_request["id"], exc, traceback=traceback,
+ store_result=store_result, call_errbacks=call_errbacks,
+ request=trailer_request_obj(**request.trailer_request),
+ state=state
+ )
+
if request.chord:
self.on_chord_part_return(request, state, exc)
if call_errbacks and request.errbacks:
diff --git a/celery/backends/redis.py b/celery/backends/redis.py
index cce2dc92c2a..7a96bdbd66d 100644
--- a/celery/backends/redis.py
+++ b/celery/backends/redis.py
@@ -158,7 +158,8 @@ def drain_events(self, timeout=None):
def consume_from(self, task_id):
if self._pubsub is None:
return self.start(task_id)
- self._consume_from(task_id)
+ else:
+ self._consume_from(task_id)
def _consume_from(self, task_id):
key = self._get_key_for_task(task_id)
@@ -270,6 +271,7 @@ def __init__(self, host=None, port=None, db=None, password=None,
self.connection_errors, self.channel_errors = (
get_redis_error_classes() if get_redis_error_classes
else ((), ()))
+
self.result_consumer = self.ResultConsumer(
self, self.app, self.accept,
self._pending_results, self._pending_messages,
diff --git a/celery/canvas.py b/celery/canvas.py
index 6536ef5ab28..f8482b71791 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -274,7 +274,7 @@ def clone(self, args=None, kwargs=None, **opts):
partial = clone
def freeze(self, _id=None, group_id=None, chord=None,
- root_id=None, parent_id=None, group_index=None):
+ root_id=None, parent_id=None, group_index=None, trailer_request=None):
"""Finalize the signature by adding a concrete task id.
The task won't be called and you shouldn't call the signature
@@ -303,6 +303,8 @@ def freeze(self, _id=None, group_id=None, chord=None,
opts['chord'] = chord
if group_index is not None:
opts['group_index'] = group_index
+ if trailer_request is not None:
+ opts['trailer_request'] = trailer_request
# pylint: disable=too-many-function-args
# Borks on this, as it's a property.
return self.AsyncResult(tid)
@@ -686,13 +688,13 @@ def run(self, args=None, kwargs=None, group_id=None, chord=None,
return results_from_prepare[0]
def freeze(self, _id=None, group_id=None, chord=None,
- root_id=None, parent_id=None, group_index=None):
+ root_id=None, parent_id=None, group_index=None,trailer_request=None):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
_, results = self._frozen = self.prepare_steps(
self.args, self.kwargs, self.tasks, root_id, parent_id, None,
self.app, _id, group_id, chord, clone=False,
- group_index=group_index,
+ group_index=group_index, trailer_request=trailer_request
)
return results[0]
@@ -700,7 +702,7 @@ def prepare_steps(self, args, kwargs, tasks,
root_id=None, parent_id=None, link_error=None, app=None,
last_task_id=None, group_id=None, chord_body=None,
clone=True, from_dict=Signature.from_dict,
- group_index=None):
+ group_index=None, trailer_request=None):
app = app or self.app
# use chain message field for protocol 2 and later.
# this avoids pickle blowing the stack on the recursion
@@ -777,7 +779,7 @@ def prepare_steps(self, args, kwargs, tasks,
res = task.freeze(
last_task_id,
root_id=root_id, group_id=group_id, chord=chord_body,
- group_index=group_index,
+ group_index=group_index, trailer_request=trailer_request,
)
else:
res = task.freeze(root_id=root_id)
@@ -1204,7 +1206,7 @@ def _freeze_gid(self, options):
return options, group_id, options.get('root_id')
def freeze(self, _id=None, group_id=None, chord=None,
- root_id=None, parent_id=None, group_index=None):
+ root_id=None, parent_id=None, group_index=None, trailer_request=None):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
opts = self.options
@@ -1218,6 +1220,8 @@ def freeze(self, _id=None, group_id=None, chord=None,
opts['chord'] = chord
if group_index is not None:
opts['group_index'] = group_index
+ if trailer_request is not None:
+ opts['trailer_request'] = trailer_request
root_id = opts.setdefault('root_id', root_id)
parent_id = opts.setdefault('parent_id', parent_id)
new_tasks = []
@@ -1327,7 +1331,7 @@ def __call__(self, body=None, **options):
return self.apply_async((), {'body': body} if body else {}, **options)
def freeze(self, _id=None, group_id=None, chord=None,
- root_id=None, parent_id=None, group_index=None):
+ root_id=None, parent_id=None, group_index=None, trailer_request=None):
# pylint: disable=redefined-outer-name
# XXX chord is also a class in outer scope.
if not isinstance(self.tasks, group):
@@ -1336,7 +1340,7 @@ def freeze(self, _id=None, group_id=None, chord=None,
parent_id=parent_id, root_id=root_id, chord=self.body)
body_result = self.body.freeze(
_id, root_id=root_id, chord=chord, group_id=group_id,
- group_index=group_index)
+ group_index=group_index, trailer_request=trailer_request)
# we need to link the body result back to the group result,
# but the body may actually be a chain,
# so find the first result without a parent
diff --git a/celery/worker/request.py b/celery/worker/request.py
index 81c3387d98a..c8b6fa1299b 100644
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -75,7 +75,7 @@ class Request:
if not IS_PYPY: # pragma: no cover
__slots__ = (
- '_app', '_type', 'name', 'id', '_root_id', '_parent_id',
+ '_app', '_type', 'name', 'id', '_root_id', '_parent_id', '_trailer_request',
'_on_ack', '_body', '_hostname', '_eventer', '_connection_errors',
'_task', '_eta', '_expires', '_request_dict', '_on_reject', '_utc',
'_content_type', '_content_encoding', '_argsrepr', '_kwargsrepr',
@@ -626,6 +626,11 @@ def group_index(self):
# used by backend.on_chord_part_return to order return values in group
return self._request_dict.get('group_index')
+ @cached_property
+ def trailer_request(self):
+ # used by backend.on_chord_part_return to order return values in group
+ return self._request_dict.get('trailer_request') or []
+
def create_request_cls(base, task, pool, hostname, eventer,
ref=ref, revoked_tasks=revoked_tasks,
From 813eea4052cce02ae74b95fb7e0eeb9bd059a0e2 Mon Sep 17 00:00:00 2001
From: Chris Lee
Date: Tue, 13 Oct 2020 11:55:03 -0400
Subject: [PATCH 155/157] ADD: merge options was overriding link_error values
---
celery/canvas.py | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/celery/canvas.py b/celery/canvas.py
index f8482b71791..25fd111ab4e 100644
--- a/celery/canvas.py
+++ b/celery/canvas.py
@@ -238,8 +238,15 @@ def _merge(self, args=None, kwargs=None, options=None, force=False):
})
else:
new_options = self.options
+
+ new_options = new_options if new_options else {}
+ new_options["link_error"] = (
+ new_options.get("link_error", []) + new_options.pop("link_error", [])
+ )
+
if self.immutable and not force:
return (self.args, self.kwargs, new_options)
+
return (tuple(args) + tuple(self.args) if args else self.args,
dict(self.kwargs, **kwargs) if kwargs else self.kwargs,
new_options)
From 92173932ab2e76e97359013c9bdba87d0acb7e7d Mon Sep 17 00:00:00 2001
From: Chris Lee
Date: Thu, 19 Nov 2020 13:49:14 -0500
Subject: [PATCH 156/157] PATCH: DLX and reject behaviour
---
celery/worker/request.py | 28 ++++++++++++++++++++--------
1 file changed, 20 insertions(+), 8 deletions(-)
diff --git a/celery/worker/request.py b/celery/worker/request.py
index c8b6fa1299b..40c9cc4a286 100644
--- a/celery/worker/request.py
+++ b/celery/worker/request.py
@@ -3,6 +3,7 @@
This module defines the :class:`Request` class, that specifies
how tasks are executed.
"""
+import os
import logging
import sys
from datetime import datetime
@@ -35,6 +36,8 @@
IS_PYPY = hasattr(sys, 'pypy_version_info')
+REJECT_TO_HIGH_MEMORY = os.getenv("REJECT_TO_HIGH_MEMORY")
+
logger = get_logger(__name__)
debug, info, warn, error = (logger.debug, logger.info,
logger.warning, logger.error)
@@ -485,7 +488,8 @@ def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
"""Handler called if the task raised an exception."""
task_ready(self)
if isinstance(exc_info.exception, MemoryError):
- raise MemoryError(f'Process got: {exc_info.exception}')
+ if not REJECT_TO_HIGH_MEMORY or not self.task.acks_late:
+ raise MemoryError(f'Process got: {exc_info.exception}')
elif isinstance(exc_info.exception, Reject):
return self.reject(requeue=exc_info.exception.requeue)
elif isinstance(exc_info.exception, Ignore):
@@ -497,17 +501,25 @@ def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
return self.on_retry(exc_info)
# (acks_late) acknowledge after result stored.
- requeue = False
if self.task.acks_late:
reject = (
self.task.reject_on_worker_lost and
- isinstance(exc, WorkerLostError)
+ isinstance(exc, (WorkerLostError, MemoryError, Terminated))
)
ack = self.task.acks_on_failure_or_timeout
if reject:
- requeue = True
- self.reject(requeue=requeue)
- send_failed_event = False
+ if REJECT_TO_HIGH_MEMORY:
+ # If we have a higher memory queue, reject without retry
+ self.reject(requeue=False)
+ # Don't send a failure event
+ send_failed_event = False
+ return
+ else:
+ send_failed_event = True
+ return_ok = False
+ # Acknowledge the message so it doesn't get retried
+ # and can be marked as complete
+ self.acknowledge()
elif ack:
self.acknowledge()
else:
@@ -521,8 +533,8 @@ def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
self._announce_revoked(
'terminated', True, str(exc), False)
send_failed_event = False # already sent revoked event
- elif not requeue and (isinstance(exc, WorkerLostError) or not return_ok):
- # only mark as failure if task has not been requeued
+ elif not return_ok:
+ # We do not ever want to retry failed tasks unless worker lost or terminated
self.task.backend.mark_as_failure(
self.id, exc, request=self._context,
store_result=self.store_errors,
From 61b1a27c16242eb874e1b23d0911ebbef7bb8757 Mon Sep 17 00:00:00 2001
From: Chris Lee
Date: Tue, 22 Dec 2020 16:31:16 -0500
Subject: [PATCH 157/157] FIX: amqp dependencies
---
requirements/default.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements/default.txt b/requirements/default.txt
index 33c3b6be9f8..10d347e896c 100644
--- a/requirements/default.txt
+++ b/requirements/default.txt
@@ -5,4 +5,4 @@ vine>=5.0.0,<6.0
click>=7.0,<8.0
click-didyoumean>=0.0.3
click-repl>=0.1.6
-click-plugins>=1.1.1
+click-plugins>=1.1.1
\ No newline at end of file