From 899db95e5583837046857edff2fd61bce5848366 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Alix?= Date: Fri, 29 Jul 2022 10:54:50 +0200 Subject: [PATCH 01/14] base_import_async: add a dedicated channel --- base_import_async/data/queue_job_function_data.xml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/base_import_async/data/queue_job_function_data.xml b/base_import_async/data/queue_job_function_data.xml index 22cc8dbab0..fb04a63613 100644 --- a/base_import_async/data/queue_job_function_data.xml +++ b/base_import_async/data/queue_job_function_data.xml @@ -1,7 +1,13 @@ + + base_import + + + _split_file + _import_one_chunk + Date: Tue, 18 Oct 2022 13:04:31 +0200 Subject: [PATCH 02/14] Update test with proper way to pass company to job --- test_queue_job/models/test_models.py | 2 +- test_queue_job/tests/test_job.py | 41 ++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/test_queue_job/models/test_models.py b/test_queue_job/models/test_models.py index f810dba862..573e2380a9 100644 --- a/test_queue_job/models/test_models.py +++ b/test_queue_job/models/test_models.py @@ -40,7 +40,7 @@ class ModelTestQueueJob(models.Model): # to test the context is serialized/deserialized properly @api.model def _job_prepare_context_before_enqueue_keys(self): - return ("tz", "lang") + return ("tz", "lang", "allowed_company_ids") def testing_method(self, *args, **kwargs): """Method used for tests diff --git a/test_queue_job/tests/test_job.py b/test_queue_job/tests/test_job.py index 094a56165b..f57818621c 100644 --- a/test_queue_job/tests/test_job.py +++ b/test_queue_job/tests/test_job.py @@ -185,6 +185,47 @@ def test_postpone(self): self.assertEqual(job_a.result, "test") self.assertFalse(job_a.exc_info) + def test_company_simple(self): + company = self.env.ref("base.main_company") + eta = datetime.now() + timedelta(hours=5) + test_job = Job( + self.env["test.queue.job"].with_company(company).testing_method, + args=("o", "k"), + kwargs={"return_context": 1}, + priority=15, + eta=eta, + description="My description", + ) + test_job.worker_pid = 99999 # normally set on "set_start" + test_job.store() + job_read = Job.load(self.env, test_job.uuid) + self.assertEqual(test_job.func.__func__, job_read.func.__func__) + result_ctx = job_read.func(*tuple(test_job.args), **test_job.kwargs) + self.assertEqual(result_ctx.get("allowed_company_ids"), company.ids) + + def test_company_complex(self): + company1 = self.env.ref("base.main_company") + company2 = company1.create({"name": "Queue job company"}) + companies = company1 | company2 + self.env.user.write({"company_ids": [(6, False, companies.ids)]}) + # Ensure the main company still the first + self.assertEqual(self.env.user.company_id, company1) + eta = datetime.now() + timedelta(hours=5) + test_job = Job( + self.env["test.queue.job"].with_company(company2).testing_method, + args=("o", "k"), + kwargs={"return_context": 1}, + priority=15, + eta=eta, + description="My description", + ) + test_job.worker_pid = 99999 # normally set on "set_start" + test_job.store() + job_read = Job.load(self.env, test_job.uuid) + self.assertEqual(test_job.func.__func__, job_read.func.__func__) + result_ctx = job_read.func(*tuple(test_job.args), **test_job.kwargs) + self.assertEqual(result_ctx.get("allowed_company_ids"), company2.ids) + def test_store(self): test_job = Job(self.method) test_job.store() From 8dcd8b18b7bf433e1afa5efd31561faa53e136b8 Mon Sep 17 00:00:00 2001 From: "Laurent Mignon (ACSONE)" Date: Thu, 21 Dec 2023 13:41:47 +0100 Subject: [PATCH 03/14] [IMP] queue_job_cron: Avoid parallel run By default, odoo never runs the same cron job in parallel. This commit uses the identity key mechanism to enforce this mechanism when a cron job is run as a queue job. This behaviour can be controlled by a new setting on the cron definition but is activated by default to keep the original behaviour --- queue_job_cron/models/ir_cron.py | 37 ++++++++++++++----- .../readme/newsfragments/.gitignore | 0 .../readme/newsfragments/612.feature | 9 +++++ queue_job_cron/tests/test_queue_job_cron.py | 19 ++++++++++ queue_job_cron/views/ir_cron_view.xml | 4 ++ 5 files changed, 59 insertions(+), 10 deletions(-) create mode 100644 queue_job_cron/readme/newsfragments/.gitignore create mode 100644 queue_job_cron/readme/newsfragments/612.feature diff --git a/queue_job_cron/models/ir_cron.py b/queue_job_cron/models/ir_cron.py index 440740f164..7e4f5b848d 100644 --- a/queue_job_cron/models/ir_cron.py +++ b/queue_job_cron/models/ir_cron.py @@ -4,12 +4,23 @@ from odoo import api, fields, models +from odoo.addons.queue_job.job import identity_exact + _logger = logging.getLogger(__name__) class IrCron(models.Model): _inherit = "ir.cron" + no_parallel_queue_job_run = fields.Boolean( + help="Avoid parallel run. " + "If the cron job is already running, the new one will be skipped. " + "By default, odoo never runs the same cron job in parallel. This " + "option is therefore set to True by default when job is run as a " + "queue job.", + default=True, + ) + run_as_queue_job = fields.Boolean( help="Specify if this cron should be ran as a queue job" ) @@ -39,23 +50,29 @@ def method_direct_trigger(self): _cron = cron.with_user(cron.user_id).with_context( lastcall=cron.lastcall ) - _cron.with_delay( - priority=_cron.priority, - description=_cron.name, - channel=_cron.channel_id.complete_name, - )._run_job_as_queue_job(server_action=_cron.ir_actions_server_id) + _cron._delay_run_job_as_queue_job( + server_action=_cron.ir_actions_server_id + ) return True def _callback(self, cron_name, server_action_id, job_id): cron = self.env["ir.cron"].sudo().browse(job_id) if cron.run_as_queue_job: server_action = self.env["ir.actions.server"].browse(server_action_id) - return self.with_delay( - priority=cron.priority, - description=cron.name, - channel=cron.channel_id.complete_name, - )._run_job_as_queue_job(server_action=server_action) + return cron._delay_run_job_as_queue_job(server_action=server_action) else: return super()._callback( cron_name=cron_name, server_action_id=server_action_id, job_id=job_id ) + + def _delay_run_job_as_queue_job(self, server_action): + self.ensure_one() + identity_key = None + if self.no_parallel_queue_job_run: + identity_key = identity_exact + return self.with_delay( + priority=self.priority, + description=self.name, + channel=self.channel_id.complete_name, + identity_key=identity_key, + )._run_job_as_queue_job(server_action=server_action) diff --git a/queue_job_cron/readme/newsfragments/.gitignore b/queue_job_cron/readme/newsfragments/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/queue_job_cron/readme/newsfragments/612.feature b/queue_job_cron/readme/newsfragments/612.feature new file mode 100644 index 0000000000..9c521620a2 --- /dev/null +++ b/queue_job_cron/readme/newsfragments/612.feature @@ -0,0 +1,9 @@ +By default prevent parallel run of the same cron job when run as queue job. + +When a cron job is run by odoo, the odoo runner will prevent parallel run +of the same cron job. Before this change, this was not the case when the +cron job was run as a queue job. A new option is added to the cron job when +run as a queue job to prevent parallel run. This option is set to True by +default. In this way, the behavior is now the same as when the cron job is run +by odoo but you keep the possibility to disable this restriction when run as +a queue job. diff --git a/queue_job_cron/tests/test_queue_job_cron.py b/queue_job_cron/tests/test_queue_job_cron.py index 3eec55f7e9..d3cc18d636 100644 --- a/queue_job_cron/tests/test_queue_job_cron.py +++ b/queue_job_cron/tests/test_queue_job_cron.py @@ -39,3 +39,22 @@ def test_queue_job_cron_run(self): cron = self.env.ref("queue_job.ir_cron_autovacuum_queue_jobs") IrCron = self.env["ir.cron"] IrCron._run_job_as_queue_job(server_action=cron.ir_actions_server_id) + + def test_queue_job_no_parallelism(self): + cron = self.env.ref("queue_job.ir_cron_autovacuum_queue_jobs") + default_channel = self.env.ref("queue_job_cron.channel_root_ir_cron") + cron.write( + { + "no_parallel_queue_job_run": True, + "run_as_queue_job": True, + "channel_id": default_channel.id, + } + ) + cron.method_direct_trigger() + cron.method_direct_trigger() + nb_jobs = self.env["queue.job"].search_count([("name", "=", cron.name)]) + self.assertEqual(nb_jobs, 1) + cron.no_parallel_queue_job_run = False + cron.method_direct_trigger() + nb_jobs = self.env["queue.job"].search_count([("name", "=", cron.name)]) + self.assertEqual(nb_jobs, 2) diff --git a/queue_job_cron/views/ir_cron_view.xml b/queue_job_cron/views/ir_cron_view.xml index 1c2c271fb1..2b3567bfcf 100644 --- a/queue_job_cron/views/ir_cron_view.xml +++ b/queue_job_cron/views/ir_cron_view.xml @@ -7,6 +7,10 @@ + Date: Wed, 10 Apr 2024 18:29:50 +0200 Subject: [PATCH 05/14] [FIX] queue_job_cron_jobrunner: use priority to select job * use FIFO, firt createad job will be treat first * if priority are different it take the precedent Yet we are not using channel priority into account --- queue_job_cron_jobrunner/models/queue_job.py | 2 +- .../tests/test_queue_job.py | 30 ++++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/queue_job_cron_jobrunner/models/queue_job.py b/queue_job_cron_jobrunner/models/queue_job.py index 4efd8b21d5..a7acdc5640 100644 --- a/queue_job_cron_jobrunner/models/queue_job.py +++ b/queue_job_cron_jobrunner/models/queue_job.py @@ -40,7 +40,7 @@ def _acquire_one_job(self): FROM queue_job WHERE state = 'pending' AND (eta IS NULL OR eta <= (now() AT TIME ZONE 'UTC')) - ORDER BY date_created DESC + ORDER BY priority, date_created LIMIT 1 FOR NO KEY UPDATE SKIP LOCKED """ ) diff --git a/queue_job_cron_jobrunner/tests/test_queue_job.py b/queue_job_cron_jobrunner/tests/test_queue_job.py index 3f2e0ef637..54800b792c 100644 --- a/queue_job_cron_jobrunner/tests/test_queue_job.py +++ b/queue_job_cron_jobrunner/tests/test_queue_job.py @@ -67,5 +67,33 @@ def test_queue_job_cron_trigger_enqueue_dependencies(self): self.assertEqual(job_record.state, "done", "Processed OK") # if the state is "waiting_dependencies", it means the "enqueue_waiting()" - # step has not been doen when the parent job has been done + # step has not been done when the parent job has been done self.assertEqual(job_record_depends.state, "done", "Processed OK") + + def test_acquire_one_job_use_priority(self): + with freeze_time("2024-01-01 10:01:01"): + self.env["res.partner"].with_delay(priority=3).create({"name": "test"}) + + with freeze_time("2024-01-01 10:02:01"): + job = ( + self.env["res.partner"].with_delay(priority=1).create({"name": "test"}) + ) + + with freeze_time("2024-01-01 10:03:01"): + self.env["res.partner"].with_delay(priority=2).create({"name": "test"}) + + self.assertEqual(self.env["queue.job"]._acquire_one_job(), job.db_record()) + + def test_acquire_one_job_consume_the_oldest_first(self): + with freeze_time("2024-01-01 10:01:01"): + job = ( + self.env["res.partner"].with_delay(priority=30).create({"name": "test"}) + ) + + with freeze_time("2024-01-01 10:02:01"): + self.env["res.partner"].with_delay(priority=30).create({"name": "test"}) + + with freeze_time("2024-01-01 10:03:01"): + self.env["res.partner"].with_delay(priority=30).create({"name": "test"}) + + self.assertEqual(self.env["queue.job"]._acquire_one_job(), job.db_record()) From 2015e7a312b8a62d6c07d0ca09cc8cb2fc1132d7 Mon Sep 17 00:00:00 2001 From: Florian Mounier Date: Tue, 4 Jun 2024 09:29:10 +0200 Subject: [PATCH 06/14] [IMP] queue_job: Display warning before displaying big dependency graphs --- queue_job/static/src/js/queue_job_fields.js | 20 +++++++++++++++++++ .../static/src/scss/queue_job_fields.scss | 7 +++++++ 2 files changed, 27 insertions(+) diff --git a/queue_job/static/src/js/queue_job_fields.js b/queue_job/static/src/js/queue_job_fields.js index 3829ad7145..7da8a6cfd6 100644 --- a/queue_job/static/src/js/queue_job_fields.js +++ b/queue_job/static/src/js/queue_job_fields.js @@ -17,6 +17,7 @@ odoo.define("queue_job.fields", function (require) { init: function () { this._super.apply(this, arguments); this.network = null; + this.forceRender = false; this.tabListenerInstalled = false; }, start: function () { @@ -89,6 +90,25 @@ odoo.define("queue_job.fields", function (require) { }); }); + if (nodes.length * edges.length > 5000 && !this.forceRender) { + const warningDiv = document.createElement("div"); + warningDiv.className = "alert alert-warning"; + warningDiv.innerText = + `This graph is big (${nodes.length} nodes, ` + + `${edges.length} edges), it may take a while to display.`; + const button = document.createElement("button"); + button.innerText = "Display anyway"; + button.className = "btn btn-secondary"; + button.onclick = function () { + self.forceRender = true; + warningDiv.parentNode.removeChild(warningDiv); + self._render(); + }; + warningDiv.appendChild(button); + this.$el.append(warningDiv); + return; + } + var data = { nodes: new vis.DataSet(nodes), edges: new vis.DataSet(edges), diff --git a/queue_job/static/src/scss/queue_job_fields.scss b/queue_job/static/src/scss/queue_job_fields.scss index 150469a384..64cb6465cf 100644 --- a/queue_job/static/src/scss/queue_job_fields.scss +++ b/queue_job/static/src/scss/queue_job_fields.scss @@ -2,4 +2,11 @@ width: 600px; height: 400px; border: 1px solid lightgray; + .alert { + height: 100%; + display: flex; + flex-direction: column; + align-items: center; + justify-content: center; + } } From 33bfb878c89e7cd6a3ac401d5846a7d481d24aa0 Mon Sep 17 00:00:00 2001 From: Florian Mounier Date: Tue, 19 Nov 2024 09:21:11 +0100 Subject: [PATCH 07/14] [IMP] queue_job: Add split method --- queue_job/README.rst | 32 +++++++++ queue_job/delay.py | 46 ++++++++++++ queue_job/readme/USAGE.rst | 32 +++++++++ queue_job/static/description/index.html | 28 ++++++++ queue_job/tests/__init__.py | 1 + queue_job/tests/test_delayable.py | 6 +- queue_job/tests/test_delayable_split.py | 94 +++++++++++++++++++++++++ 7 files changed, 237 insertions(+), 2 deletions(-) create mode 100644 queue_job/tests/test_delayable_split.py diff --git a/queue_job/README.rst b/queue_job/README.rst index a22f8ad8bb..6ed473e48f 100644 --- a/queue_job/README.rst +++ b/queue_job/README.rst @@ -254,6 +254,38 @@ Note: ``delay()`` must be called on the delayable, chain, or group which is at t of the graph. In the example above, if it was called on ``group_a``, then ``group_b`` would never be delayed (but a warning would be shown). +It is also possible to split a job into several jobs, each one processing a part of the +work. This can be useful to avoid very long jobs, parallelize some task and get more specific +errors. Usage is as follows: + +.. code-block:: python + + def button_split_delayable(self): + ( + self # Can be a big recordset, let's say 1000 records + .delayable() + .generate_thumbnail((50, 50)) + .set(priority=30) + .set(description=_("generate xxx")) + .split(50) # Split the job in 20 jobs of 50 records each + .delay() + ) + +The ``split()`` method takes a ``chain`` boolean keyword argument. If set to +True, the jobs will be chained, meaning that the next job will only start when the previous +one is done: + +.. code-block:: python + + def button_increment_var(self): + ( + self + .delayable() + .increment_counter() + .split(1, chain=True) # Will exceute the jobs one after the other + .delay() + ) + Enqueing Job Options -------------------- diff --git a/queue_job/delay.py b/queue_job/delay.py index 77c823c63c..323749def2 100644 --- a/queue_job/delay.py +++ b/queue_job/delay.py @@ -534,6 +534,52 @@ def delay(self): """Delay the whole graph""" self._graph.delay() + def split(self, size, chain=False): + """Split the Delayables. + + Use `DelayableGroup` or `DelayableChain` + if `chain` is True containing batches of size `size` + """ + if not self._job_method: + raise ValueError("No method set on the Delayable") + + total_records = len(self.recordset) + + delayables = [] + for index in range(0, total_records, size): + recordset = self.recordset[index : index + size] + delayable = Delayable( + recordset, + priority=self.priority, + eta=self.eta, + max_retries=self.max_retries, + description=self.description, + channel=self.channel, + identity_key=self.identity_key, + ) + # Update the __self__ + delayable._job_method = getattr(recordset, self._job_method.__name__) + delayable._job_args = self._job_args + delayable._job_kwargs = self._job_kwargs + + delayables.append(delayable) + + description = self.description or ( + self._job_method.__doc__.splitlines()[0].strip() + if self._job_method.__doc__ + else "{}.{}".format(self.recordset._name, self._job_method.__name__) + ) + for index, delayable in enumerate(delayables): + delayable.set( + description="%s (split %s/%s)" + % (description, index + 1, len(delayables)) + ) + + # Prevent warning on deletion + self._generated_job = True + + return (DelayableChain if chain else DelayableGroup)(*delayables) + def _build_job(self): if self._generated_job: return self._generated_job diff --git a/queue_job/readme/USAGE.rst b/queue_job/readme/USAGE.rst index 8f7da0473c..66dd1744fd 100644 --- a/queue_job/readme/USAGE.rst +++ b/queue_job/readme/USAGE.rst @@ -104,6 +104,38 @@ Note: ``delay()`` must be called on the delayable, chain, or group which is at t of the graph. In the example above, if it was called on ``group_a``, then ``group_b`` would never be delayed (but a warning would be shown). +It is also possible to split a job into several jobs, each one processing a part of the +work. This can be useful to avoid very long jobs, parallelize some task and get more specific +errors. Usage is as follows: + +.. code-block:: python + + def button_split_delayable(self): + ( + self # Can be a big recordset, let's say 1000 records + .delayable() + .generate_thumbnail((50, 50)) + .set(priority=30) + .set(description=_("generate xxx")) + .split(50) # Split the job in 20 jobs of 50 records each + .delay() + ) + +The ``split()`` method takes a ``chain`` boolean keyword argument. If set to +True, the jobs will be chained, meaning that the next job will only start when the previous +one is done: + +.. code-block:: python + + def button_increment_var(self): + ( + self + .delayable() + .increment_counter() + .split(1, chain=True) # Will exceute the jobs one after the other + .delay() + ) + Enqueing Job Options -------------------- diff --git a/queue_job/static/description/index.html b/queue_job/static/description/index.html index bcd8f0580b..7864577481 100644 --- a/queue_job/static/description/index.html +++ b/queue_job/static/description/index.html @@ -599,6 +599,34 @@

Delaying jobs

Note: delay() must be called on the delayable, chain, or group which is at the top of the graph. In the example above, if it was called on group_a, then group_b would never be delayed (but a warning would be shown).

+

It is also possible to split a job into several jobs, each one processing a part of the +work. This can be useful to avoid very long jobs, parallelize some task and get more specific +errors. Usage is as follows:

+
+def button_split_delayable(self):
+    (
+        self  # Can be a big recordset, let's say 1000 records
+        .delayable()
+        .generate_thumbnail((50, 50))
+        .set(priority=30)
+        .set(description=_("generate xxx"))
+        .split(50)  # Split the job in 20 jobs of 50 records each
+        .delay()
+    )
+
+

The split() method takes a chain boolean keyword argument. If set to +True, the jobs will be chained, meaning that the next job will only start when the previous +one is done:

+
+def button_increment_var(self):
+    (
+        self
+        .delayable()
+        .increment_counter()
+        .split(1, chain=True) # Will exceute the jobs one after the other
+        .delay()
+    )
+

Enqueing Job Options

diff --git a/queue_job/tests/__init__.py b/queue_job/tests/__init__.py index e0ff9576a5..db53ac3a60 100644 --- a/queue_job/tests/__init__.py +++ b/queue_job/tests/__init__.py @@ -1,6 +1,7 @@ from . import test_runner_channels from . import test_runner_runner from . import test_delayable +from . import test_delayable_split from . import test_json_field from . import test_model_job_channel from . import test_model_job_function diff --git a/queue_job/tests/test_delayable.py b/queue_job/tests/test_delayable.py index 097c29f25e..6284ce80ab 100644 --- a/queue_job/tests/test_delayable.py +++ b/queue_job/tests/test_delayable.py @@ -1,13 +1,15 @@ # copyright 2019 Camptocamp # license agpl-3.0 or later (http://www.gnu.org/licenses/agpl.html) -import unittest from unittest import mock +from odoo.tests import common + +# pylint: disable=odoo-addons-relative-import from odoo.addons.queue_job.delay import Delayable, DelayableGraph -class TestDelayable(unittest.TestCase): +class TestDelayable(common.BaseCase): def setUp(self): super().setUp() self.recordset = mock.MagicMock(name="recordset") diff --git a/queue_job/tests/test_delayable_split.py b/queue_job/tests/test_delayable_split.py new file mode 100644 index 0000000000..b761878b2e --- /dev/null +++ b/queue_job/tests/test_delayable_split.py @@ -0,0 +1,94 @@ +# Copyright 2024 Akretion (http://www.akretion.com). +# @author Florian Mounier +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). + +from odoo.tests import common + +# pylint: disable=odoo-addons-relative-import +from odoo.addons.queue_job.delay import Delayable + + +class TestDelayableSplit(common.BaseCase): + def setUp(self): + super().setUp() + + class FakeRecordSet(list): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._name = "recordset" + + def __getitem__(self, key): + if isinstance(key, slice): + return FakeRecordSet(super().__getitem__(key)) + return super().__getitem__(key) + + def method(self, arg, kwarg=None): + """Method to be called""" + return arg, kwarg + + self.FakeRecordSet = FakeRecordSet + + def test_delayable_split_no_method_call_beforehand(self): + dl = Delayable(self.FakeRecordSet(range(20))) + with self.assertRaises(ValueError): + dl.split(3) + + def test_delayable_split_10_3(self): + dl = Delayable(self.FakeRecordSet(range(10))) + dl.method("arg", kwarg="kwarg") + group = dl.split(3) + self.assertEqual(len(group._delayables), 4) + delayables = sorted(list(group._delayables), key=lambda x: x.description) + self.assertEqual(delayables[0].recordset, self.FakeRecordSet([0, 1, 2])) + self.assertEqual(delayables[1].recordset, self.FakeRecordSet([3, 4, 5])) + self.assertEqual(delayables[2].recordset, self.FakeRecordSet([6, 7, 8])) + self.assertEqual(delayables[3].recordset, self.FakeRecordSet([9])) + self.assertEqual(delayables[0].description, "Method to be called (split 1/4)") + self.assertEqual(delayables[1].description, "Method to be called (split 2/4)") + self.assertEqual(delayables[2].description, "Method to be called (split 3/4)") + self.assertEqual(delayables[3].description, "Method to be called (split 4/4)") + self.assertNotEqual(delayables[0]._job_method, dl._job_method) + self.assertNotEqual(delayables[1]._job_method, dl._job_method) + self.assertNotEqual(delayables[2]._job_method, dl._job_method) + self.assertNotEqual(delayables[3]._job_method, dl._job_method) + self.assertEqual(delayables[0]._job_method.__name__, dl._job_method.__name__) + self.assertEqual(delayables[1]._job_method.__name__, dl._job_method.__name__) + self.assertEqual(delayables[2]._job_method.__name__, dl._job_method.__name__) + self.assertEqual(delayables[3]._job_method.__name__, dl._job_method.__name__) + self.assertEqual(delayables[0]._job_args, ("arg",)) + self.assertEqual(delayables[1]._job_args, ("arg",)) + self.assertEqual(delayables[2]._job_args, ("arg",)) + self.assertEqual(delayables[3]._job_args, ("arg",)) + self.assertEqual(delayables[0]._job_kwargs, {"kwarg": "kwarg"}) + self.assertEqual(delayables[1]._job_kwargs, {"kwarg": "kwarg"}) + self.assertEqual(delayables[2]._job_kwargs, {"kwarg": "kwarg"}) + self.assertEqual(delayables[3]._job_kwargs, {"kwarg": "kwarg"}) + + def test_delayable_split_10_5(self): + dl = Delayable(self.FakeRecordSet(range(10))) + dl.method("arg", kwarg="kwarg") + group = dl.split(5) + self.assertEqual(len(group._delayables), 2) + delayables = sorted(list(group._delayables), key=lambda x: x.description) + self.assertEqual(delayables[0].recordset, self.FakeRecordSet([0, 1, 2, 3, 4])) + self.assertEqual(delayables[1].recordset, self.FakeRecordSet([5, 6, 7, 8, 9])) + self.assertEqual(delayables[0].description, "Method to be called (split 1/2)") + self.assertEqual(delayables[1].description, "Method to be called (split 2/2)") + + def test_delayable_split_10_10(self): + dl = Delayable(self.FakeRecordSet(range(10))) + dl.method("arg", kwarg="kwarg") + group = dl.split(10) + self.assertEqual(len(group._delayables), 1) + delayables = sorted(list(group._delayables), key=lambda x: x.description) + self.assertEqual(delayables[0].recordset, self.FakeRecordSet(range(10))) + self.assertEqual(delayables[0].description, "Method to be called (split 1/1)") + + def test_delayable_split_10_20(self): + dl = Delayable(self.FakeRecordSet(range(10))) + dl.method("arg", kwarg="kwarg") + group = dl.split(20) + self.assertEqual(len(group._delayables), 1) + delayables = sorted(list(group._delayables), key=lambda x: x.description) + self.assertEqual(delayables[0].recordset, self.FakeRecordSet(range(10))) + self.assertEqual(delayables[0].description, "Method to be called (split 1/1)") From c6668070e8889c05e00fbac85db4817c7eae8a21 Mon Sep 17 00:00:00 2001 From: Quoc Duong Date: Thu, 1 Aug 2024 11:45:38 +0700 Subject: [PATCH 08/14] [IMP] queue_job: Cancel child jobs when the parent is cancelled --- queue_job/job.py | 12 +++++++++-- queue_job/models/queue_job.py | 2 ++ test_queue_job/tests/test_job.py | 37 ++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+), 2 deletions(-) diff --git a/queue_job/job.py b/queue_job/job.py index d38a899095..6cdb69b738 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -539,8 +539,8 @@ def perform(self): return self.result - def enqueue_waiting(self): - sql = """ + def _get_common_dependent_jobs_query(self): + return """ UPDATE queue_job SET state = %s FROM ( @@ -568,9 +568,17 @@ def enqueue_waiting(self): AND %s = ALL(jobs.parent_states) AND state = %s; """ + + def enqueue_waiting(self): + sql = self._get_common_dependent_jobs_query() self.env.cr.execute(sql, (PENDING, self.uuid, DONE, WAIT_DEPENDENCIES)) self.env["queue.job"].invalidate_cache(["state"]) + def cancel_dependent_jobs(self): + sql = self._get_common_dependent_jobs_query() + self.env.cr.execute(sql, (CANCELLED, self.uuid, CANCELLED, WAIT_DEPENDENCIES)) + self.env["queue.job"].invalidate_cache(["state"]) + def store(self): """Store the Job""" job_model = self.env["queue.job"] diff --git a/queue_job/models/queue_job.py b/queue_job/models/queue_job.py index c3a37bed34..a740d4d226 100644 --- a/queue_job/models/queue_job.py +++ b/queue_job/models/queue_job.py @@ -328,6 +328,8 @@ def _change_job_state(self, state, result=None): elif state == CANCELLED: job_.set_cancelled(result=result) job_.store() + record.env["queue.job"].flush() + job_.cancel_dependent_jobs() else: raise ValueError("State not supported: %s" % state) diff --git a/test_queue_job/tests/test_job.py b/test_queue_job/tests/test_job.py index f57818621c..502fafe26d 100644 --- a/test_queue_job/tests/test_job.py +++ b/test_queue_job/tests/test_job.py @@ -15,6 +15,7 @@ RetryableJobError, ) from odoo.addons.queue_job.job import ( + CANCELLED, DONE, ENQUEUED, FAILED, @@ -530,6 +531,42 @@ def test_button_done(self): stored.result, "Manually set to done by %s" % self.env.user.name ) + def test_button_done_enqueue_waiting_dependencies(self): + job_root = Job(self.env["test.queue.job"].testing_method) + job_child = Job(self.env["test.queue.job"].testing_method) + job_child.add_depends({job_root}) + + DelayableGraph._ensure_same_graph_uuid([job_root, job_child]) + job_root.store() + job_child.store() + + self.assertEqual(job_child.state, WAIT_DEPENDENCIES) + record_root = job_root.db_record() + record_child = job_child.db_record() + # Trigger button done + record_root.button_done() + # Check the state + self.assertEqual(record_root.state, DONE) + self.assertEqual(record_child.state, PENDING) + + def test_button_cancel_dependencies(self): + job_root = Job(self.env["test.queue.job"].testing_method) + job_child = Job(self.env["test.queue.job"].testing_method) + job_child.add_depends({job_root}) + + DelayableGraph._ensure_same_graph_uuid([job_root, job_child]) + job_root.store() + job_child.store() + + self.assertEqual(job_child.state, WAIT_DEPENDENCIES) + record_root = job_root.db_record() + record_child = job_child.db_record() + # Trigger button cancelled + record_root.button_cancelled() + # Check the state + self.assertEqual(record_root.state, CANCELLED) + self.assertEqual(record_child.state, CANCELLED) + def test_requeue(self): stored = self._create_job() stored.write({"state": "failed"}) From d44965226b6397870539cb839ff30739f323b8de Mon Sep 17 00:00:00 2001 From: Simone Orsi Date: Tue, 28 Feb 2023 15:21:25 +0100 Subject: [PATCH 09/14] queue_job: unify no delay option Current situation: * multiple keys for no good reason * half baked: not all of them used everywhere * no centralization * poor naming With this change we'll have: * 1 and only one key to disable via ctx: ``queue_job__no_delay`` * 1 and only one key to disable via os env: ``QUEUE_JOB__NO_DELAY`` * backward compatibility with deprecation for old keys --- queue_job/README.rst | 17 +++++----- queue_job/delay.py | 12 ++----- queue_job/models/base.py | 7 ++--- queue_job/readme/USAGE.rst | 14 ++++----- queue_job/static/description/index.html | 14 ++++----- queue_job/utils.py | 40 ++++++++++++++++++++++++ test_queue_job/tests/test_delay_mocks.py | 16 +++++----- 7 files changed, 75 insertions(+), 45 deletions(-) create mode 100644 queue_job/utils.py diff --git a/queue_job/README.rst b/queue_job/README.rst index 6ed473e48f..76b4446daf 100644 --- a/queue_job/README.rst +++ b/queue_job/README.rst @@ -453,13 +453,14 @@ Example: When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately. -To do so you can set `TEST_QUEUE_JOB_NO_DELAY=1` in your enviroment. +To do so you can set `QUEUE_JOB__NO_DELAY=1` in your enviroment. **Bypass jobs in tests** When writing tests on job-related methods is always tricky to deal with -delayed recordsets. To make your testing life easier -you can set `test_queue_job_no_delay=True` in the context. +delayed recordsets. To make your testing life easier, +or to run a delayed action immediately, +you can set `queue_job__no_delay=True` in the context. Tip: you can do this at test case level like this @@ -470,7 +471,7 @@ Tip: you can do this at test case level like this super().setUpClass() cls.env = cls.env(context=dict( cls.env.context, - test_queue_job_no_delay=True, # no jobs thanks + queue_job__no_delay=True, # no jobs thanks )) Then all your tests execute the job methods synchronously @@ -575,7 +576,7 @@ If you prefer, you can still test the whole thing in a single test, by calling When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately. -To do so you can set ``TEST_QUEUE_JOB_NO_DELAY=1`` in your environment. +To do so you can set ``QUEUE_JOB__NO_DELAY=1`` in your environment. .. WARNING:: Do not do this in production @@ -583,7 +584,7 @@ To do so you can set ``TEST_QUEUE_JOB_NO_DELAY=1`` in your environment. You should use ``trap_jobs``, really, but if for any reason you could not use it, and still need to have job methods executed synchronously in your tests, you can -do so by setting ``test_queue_job_no_delay=True`` in the context. +do so by setting ``queue_job__no_delay=True`` in the context. Tip: you can do this at test case level like this @@ -594,7 +595,7 @@ Tip: you can do this at test case level like this super().setUpClass() cls.env = cls.env(context=dict( cls.env.context, - test_queue_job_no_delay=True, # no jobs thanks + queue_job__no_delay=True, # no jobs thanks )) Then all your tests execute the job methods synchronously without delaying any @@ -604,7 +605,7 @@ In tests you'll have to mute the logger like: @mute_logger('odoo.addons.queue_job.models.base') -.. NOTE:: in graphs of jobs, the ``test_queue_job_no_delay`` context key must be in at +.. NOTE:: in graphs of jobs, the ``queue_job__no_delay`` context key must be in at least one job's env of the graph for the whole graph to be executed synchronously diff --git a/queue_job/delay.py b/queue_job/delay.py index 323749def2..e46e95aed9 100644 --- a/queue_job/delay.py +++ b/queue_job/delay.py @@ -4,11 +4,11 @@ import itertools import logging -import os import uuid from collections import defaultdict, deque from .job import Job +from .utils import must_run_without_delay _logger = logging.getLogger(__name__) @@ -217,17 +217,9 @@ def _has_to_execute_directly(self, vertices): In tests, prefer to use :func:`odoo.addons.queue_job.tests.common.trap_jobs`. """ - if os.getenv("TEST_QUEUE_JOB_NO_DELAY"): - _logger.warning( - "`TEST_QUEUE_JOB_NO_DELAY` env var found. NO JOB scheduled." - ) - return True envs = {vertex.recordset.env for vertex in vertices} for env in envs: - if env.context.get("test_queue_job_no_delay"): - _logger.warning( - "`test_queue_job_no_delay` ctx key found. NO JOB scheduled." - ) + if must_run_without_delay(env): return True return False diff --git a/queue_job/models/base.py b/queue_job/models/base.py index 16f106450a..4a6fa4753a 100644 --- a/queue_job/models/base.py +++ b/queue_job/models/base.py @@ -2,14 +2,12 @@ # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) import functools -import logging from odoo import api, models from ..delay import Delayable from ..job import DelayableRecordset - -_logger = logging.getLogger(__name__) +from ..utils import must_run_without_delay class Base(models.AbstractModel): @@ -216,8 +214,7 @@ def auto_delay_wrapper(self, *args, **kwargs): if ( self.env.context.get("job_uuid") or not context_delay - or self.env.context.get("_job_force_sync") - or self.env.context.get("test_queue_job_no_delay") + or must_run_without_delay(self.env) ): # we are in the job execution return auto_delay_wrapper.origin(self, *args, **kwargs) diff --git a/queue_job/readme/USAGE.rst b/queue_job/readme/USAGE.rst index 66dd1744fd..296d3236cd 100644 --- a/queue_job/readme/USAGE.rst +++ b/queue_job/readme/USAGE.rst @@ -303,13 +303,13 @@ Example: When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately. -To do so you can set `TEST_QUEUE_JOB_NO_DELAY=1` in your enviroment. +To do so you can set `QUEUE_JOB__NO_DELAY=1` in your enviroment. **Bypass jobs in tests** When writing tests on job-related methods is always tricky to deal with delayed recordsets. To make your testing life easier -you can set `test_queue_job_no_delay=True` in the context. +you can set `queue_job__no_delay=True` in the context. Tip: you can do this at test case level like this @@ -320,7 +320,7 @@ Tip: you can do this at test case level like this super().setUpClass() cls.env = cls.env(context=dict( cls.env.context, - test_queue_job_no_delay=True, # no jobs thanks + queue_job__no_delay=True, # no jobs thanks )) Then all your tests execute the job methods synchronously @@ -425,7 +425,7 @@ If you prefer, you can still test the whole thing in a single test, by calling When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately. -To do so you can set ``TEST_QUEUE_JOB_NO_DELAY=1`` in your environment. +To do so you can set ``QUEUE_JOB__NO_DELAY=1`` in your environment. .. WARNING:: Do not do this in production @@ -433,7 +433,7 @@ To do so you can set ``TEST_QUEUE_JOB_NO_DELAY=1`` in your environment. You should use ``trap_jobs``, really, but if for any reason you could not use it, and still need to have job methods executed synchronously in your tests, you can -do so by setting ``test_queue_job_no_delay=True`` in the context. +do so by setting ``queue_job__no_delay=True`` in the context. Tip: you can do this at test case level like this @@ -444,7 +444,7 @@ Tip: you can do this at test case level like this super().setUpClass() cls.env = cls.env(context=dict( cls.env.context, - test_queue_job_no_delay=True, # no jobs thanks + queue_job__no_delay=True, # no jobs thanks )) Then all your tests execute the job methods synchronously without delaying any @@ -454,7 +454,7 @@ In tests you'll have to mute the logger like: @mute_logger('odoo.addons.queue_job.models.base') -.. NOTE:: in graphs of jobs, the ``test_queue_job_no_delay`` context key must be in at +.. NOTE:: in graphs of jobs, the ``queue_job__no_delay`` context key must be in at least one job's env of the graph for the whole graph to be executed synchronously diff --git a/queue_job/static/description/index.html b/queue_job/static/description/index.html index 7864577481..e4646f64fa 100644 --- a/queue_job/static/description/index.html +++ b/queue_job/static/description/index.html @@ -762,11 +762,11 @@

Configure default options for job

Bypass jobs on running Odoo

When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately.

-

To do so you can set TEST_QUEUE_JOB_NO_DELAY=1 in your enviroment.

+

To do so you can set QUEUE_JOB__NO_DELAY=1 in your enviroment.

Bypass jobs in tests

When writing tests on job-related methods is always tricky to deal with delayed recordsets. To make your testing life easier -you can set test_queue_job_no_delay=True in the context.

+you can set queue_job__no_delay=True in the context.

Tip: you can do this at test case level like this

 @classmethod
@@ -774,7 +774,7 @@ 

Configure default options for job super().setUpClass() cls.env = cls.env(context=dict( cls.env.context, - test_queue_job_no_delay=True, # no jobs thanks + queue_job__no_delay=True, # no jobs thanks ))

Then all your tests execute the job methods synchronously @@ -870,7 +870,7 @@

Testing

Execute jobs synchronously when running Odoo

When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately.

-

To do so you can set TEST_QUEUE_JOB_NO_DELAY=1 in your environment.

+

To do so you can set QUEUE_JOB__NO_DELAY=1 in your environment.

Warning

Do not do this in production

@@ -878,7 +878,7 @@

Testing

Execute jobs synchronously in tests

You should use trap_jobs, really, but if for any reason you could not use it, and still need to have job methods executed synchronously in your tests, you can -do so by setting test_queue_job_no_delay=True in the context.

+do so by setting queue_job__no_delay=True in the context.

Tip: you can do this at test case level like this

 @classmethod
@@ -886,7 +886,7 @@ 

Testing

super().setUpClass() cls.env = cls.env(context=dict( cls.env.context, - test_queue_job_no_delay=True, # no jobs thanks + queue_job__no_delay=True, # no jobs thanks ))

Then all your tests execute the job methods synchronously without delaying any @@ -896,7 +896,7 @@

Testing

@mute_logger(‘odoo.addons.queue_job.models.base’)

Note

-

in graphs of jobs, the test_queue_job_no_delay context key must be in at +

in graphs of jobs, the queue_job__no_delay context key must be in at least one job’s env of the graph for the whole graph to be executed synchronously

diff --git a/queue_job/utils.py b/queue_job/utils.py new file mode 100644 index 0000000000..5134cd1068 --- /dev/null +++ b/queue_job/utils.py @@ -0,0 +1,40 @@ +# Copyright 2023 Camptocamp +# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html) + +import logging +import os + +_logger = logging.getLogger(__name__) + + +def must_run_without_delay(env): + """Retrun true if jobs have to run immediately. + + :param env: `odoo.api.Environment` instance + """ + # TODO: drop in v17 + if os.getenv("TEST_QUEUE_JOB_NO_DELAY"): + _logger.warning( + "`TEST_QUEUE_JOB_NO_DELAY` env var found. NO JOB scheduled. " + "Note that this key is deprecated: please use `QUEUE_JOB__NO_DELAY`" + ) + return True + + if os.getenv("QUEUE_JOB__NO_DELAY"): + _logger.warning("`QUEUE_JOB__NO_DELAY` env var found. NO JOB scheduled.") + return True + + # TODO: drop in v17 + deprecated_keys = ("_job_force_sync", "test_queue_job_no_delay") + for key in deprecated_keys: + if env.context.get(key): + _logger.warning( + "`%s` ctx key found. NO JOB scheduled. " + "Note that this key is deprecated: please use `queue_job__no_delay`", + key, + ) + return True + + if env.context.get("queue_job__no_delay"): + _logger.warning("`queue_job__no_delay` ctx key found. NO JOB scheduled.") + return True diff --git a/test_queue_job/tests/test_delay_mocks.py b/test_queue_job/tests/test_delay_mocks.py index 3577495dfe..240fb474be 100644 --- a/test_queue_job/tests/test_delay_mocks.py +++ b/test_queue_job/tests/test_delay_mocks.py @@ -294,8 +294,8 @@ def test_mock_with_delay(self): self.assertEqual(delay_args, (1,)) self.assertDictEqual(delay_kwargs, {"foo": 2}) - @mute_logger("odoo.addons.queue_job.models.base") - @mock.patch.dict(os.environ, {"TEST_QUEUE_JOB_NO_DELAY": "1"}) + @mute_logger("odoo.addons.queue_job.utils") + @mock.patch.dict(os.environ, {"QUEUE_JOB__NO_DELAY": "1"}) def test_delay_graph_direct_exec_env_var(self): node = Delayable(self.env["test.queue.job"]).create_ir_logging( "test_delay_graph_direct_exec 1" @@ -318,10 +318,10 @@ def test_delay_graph_direct_exec_env_var(self): self.assertEqual(logs[0].message, "test_delay_graph_direct_exec 2") self.assertEqual(logs[1].message, "test_delay_graph_direct_exec 1") - @mute_logger("odoo.addons.queue_job.models.base") + @mute_logger("odoo.addons.queue_job.utils") def test_delay_graph_direct_exec_context_key(self): node = Delayable( - self.env["test.queue.job"].with_context(test_queue_job_no_delay=True) + self.env["test.queue.job"].with_context(queue_job__no_delay=True) ).create_ir_logging("test_delay_graph_direct_exec 1") node2 = Delayable(self.env["test.queue.job"]).create_ir_logging( "test_delay_graph_direct_exec 2" @@ -341,8 +341,8 @@ def test_delay_graph_direct_exec_context_key(self): self.assertEqual(logs[0].message, "test_delay_graph_direct_exec 2") self.assertEqual(logs[1].message, "test_delay_graph_direct_exec 1") - @mute_logger("odoo.addons.queue_job.models.base") - @mock.patch.dict(os.environ, {"TEST_QUEUE_JOB_NO_DELAY": "1"}) + @mute_logger("odoo.addons.queue_job.utils") + @mock.patch.dict(os.environ, {"QUEUE_JOB__NO_DELAY": "1"}) def test_delay_with_delay_direct_exec_env_var(self): model = self.env["test.queue.job"] model.with_delay().create_ir_logging("test_delay_graph_direct_exec 1") @@ -357,9 +357,9 @@ def test_delay_with_delay_direct_exec_env_var(self): self.assertEqual(len(logs), 1) self.assertEqual(logs[0].message, "test_delay_graph_direct_exec 1") - @mute_logger("odoo.addons.queue_job.models.base") + @mute_logger("odoo.addons.queue_job.utils") def test_delay_with_delay_direct_exec_context_key(self): - model = self.env["test.queue.job"].with_context(test_queue_job_no_delay=True) + model = self.env["test.queue.job"].with_context(queue_job__no_delay=True) model.with_delay().create_ir_logging("test_delay_graph_direct_exec 1") # jobs are executed directly logs = self.env["ir.logging"].search( From 01eed822bcebce069919ae74f64f8755100c5c02 Mon Sep 17 00:00:00 2001 From: Florent Xicluna Date: Mon, 16 Sep 2024 13:43:47 +0200 Subject: [PATCH 10/14] [FIX] queue_job: typo --- queue_job/README.rst | 2 +- queue_job/readme/USAGE.rst | 2 +- queue_job/static/description/index.html | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/queue_job/README.rst b/queue_job/README.rst index 76b4446daf..e64147594c 100644 --- a/queue_job/README.rst +++ b/queue_job/README.rst @@ -453,7 +453,7 @@ Example: When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately. -To do so you can set `QUEUE_JOB__NO_DELAY=1` in your enviroment. +To do so you can set `QUEUE_JOB__NO_DELAY=1` in your environment. **Bypass jobs in tests** diff --git a/queue_job/readme/USAGE.rst b/queue_job/readme/USAGE.rst index 296d3236cd..b7614d2ab4 100644 --- a/queue_job/readme/USAGE.rst +++ b/queue_job/readme/USAGE.rst @@ -303,7 +303,7 @@ Example: When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately. -To do so you can set `QUEUE_JOB__NO_DELAY=1` in your enviroment. +To do so you can set `QUEUE_JOB__NO_DELAY=1` in your environment. **Bypass jobs in tests** diff --git a/queue_job/static/description/index.html b/queue_job/static/description/index.html index e4646f64fa..575c659ace 100644 --- a/queue_job/static/description/index.html +++ b/queue_job/static/description/index.html @@ -762,7 +762,7 @@

Configure default options for job

Bypass jobs on running Odoo

When you are developing (ie: connector modules) you might want to bypass the queue job and run your code immediately.

-

To do so you can set QUEUE_JOB__NO_DELAY=1 in your enviroment.

+

To do so you can set QUEUE_JOB__NO_DELAY=1 in your environment.

Bypass jobs in tests

When writing tests on job-related methods is always tricky to deal with delayed recordsets. To make your testing life easier From e6d5e01277aa2634af63a1f4153d20545acd2add Mon Sep 17 00:00:00 2001 From: Florent Xicluna Date: Mon, 16 Sep 2024 13:44:12 +0200 Subject: [PATCH 11/14] [IMP] queue_job: add filter on Date Created --- queue_job/views/queue_job_views.xml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/queue_job/views/queue_job_views.xml b/queue_job/views/queue_job_views.xml index 51336f5197..c6e2481b71 100644 --- a/queue_job/views/queue_job_views.xml +++ b/queue_job/views/queue_job_views.xml @@ -249,6 +249,22 @@ string="Cancelled" domain="[('state', '=', 'cancelled')]" /> + + + + + From c376ef760e9162d5b850dce777f9e125848225a9 Mon Sep 17 00:00:00 2001 From: Florent Xicluna Date: Thu, 27 Jan 2022 11:15:55 +0100 Subject: [PATCH 12/14] [REF] remove explicit 'object' inheritance --- queue_job/delay.py | 2 +- queue_job/job.py | 2 +- queue_job/jobrunner/channels.py | 10 +++++----- queue_job/jobrunner/runner.py | 4 ++-- test_queue_job/tests/test_job.py | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/queue_job/delay.py b/queue_job/delay.py index e46e95aed9..726e850494 100644 --- a/queue_job/delay.py +++ b/queue_job/delay.py @@ -609,7 +609,7 @@ def _execute_direct(self): self._generated_job.perform() -class DelayableRecordset(object): +class DelayableRecordset: """Allow to delay a method for a recordset (shortcut way) Usage:: diff --git a/queue_job/job.py b/queue_job/job.py index 6cdb69b738..cd5a7a348a 100644 --- a/queue_job/job.py +++ b/queue_job/job.py @@ -105,7 +105,7 @@ def identity_exact_hasher(job_): @total_ordering -class Job(object): +class Job: """A Job is a task to execute. It is the in-memory representation of a job. Jobs are stored in the ``queue.job`` Odoo Model, but they are handled diff --git a/queue_job/jobrunner/channels.py b/queue_job/jobrunner/channels.py index 6812aa4960..468fb5760d 100644 --- a/queue_job/jobrunner/channels.py +++ b/queue_job/jobrunner/channels.py @@ -14,7 +14,7 @@ _logger = logging.getLogger(__name__) -class PriorityQueue(object): +class PriorityQueue: """A priority queue that supports removing arbitrary objects. Adding an object already in the queue is a no op. @@ -103,7 +103,7 @@ def pop(self): @total_ordering -class ChannelJob(object): +class ChannelJob: """A channel job is attached to a channel and holds the properties of a job that are necessary to prioritise them. @@ -205,7 +205,7 @@ def __lt__(self, other): return self.sorting_key() < other.sorting_key() -class ChannelQueue(object): +class ChannelQueue: """A channel queue is a priority queue for jobs. Jobs with an eta are set aside until their eta is past due, at @@ -334,7 +334,7 @@ def get_wakeup_time(self, wakeup_time=0): return wakeup_time -class Channel(object): +class Channel: """A channel for jobs, with a maximum capacity. When jobs are created by queue_job modules, they may be associated @@ -581,7 +581,7 @@ def split_strip(s, sep, maxsplit=-1): return [x.strip() for x in s.split(sep, maxsplit)] -class ChannelManager(object): +class ChannelManager: """High level interface for channels This class handles: diff --git a/queue_job/jobrunner/runner.py b/queue_job/jobrunner/runner.py index 25823a9973..025c228c62 100644 --- a/queue_job/jobrunner/runner.py +++ b/queue_job/jobrunner/runner.py @@ -259,7 +259,7 @@ def urlopen(): thread.start() -class Database(object): +class Database: def __init__(self, db_name): self.db_name = db_name connection_info = _connection_info_for(db_name) @@ -344,7 +344,7 @@ def set_job_enqueued(self, uuid): ) -class QueueJobRunner(object): +class QueueJobRunner: def __init__( self, scheme="http", diff --git a/test_queue_job/tests/test_job.py b/test_queue_job/tests/test_job.py index 502fafe26d..cde6f8c67c 100644 --- a/test_queue_job/tests/test_job.py +++ b/test_queue_job/tests/test_job.py @@ -89,7 +89,7 @@ def test_infinite_retryable_error(self): self.assertEqual(test_job.retry, 1) def test_on_instance_method(self): - class A(object): + class A: def method(self): pass From 88df44d4b1ef8854b9f1ffd85f16bb838459f0df Mon Sep 17 00:00:00 2001 From: Florent Xicluna Date: Fri, 27 Dec 2024 11:10:50 +0100 Subject: [PATCH 13/14] [REF] remove explicit super() arguments --- base_export_async/tests/test_base_export_async.py | 2 +- queue_job/tests/common.py | 2 +- queue_job_subscribe/tests/test_job_subscribe.py | 2 +- test_queue_job/models/test_models.py | 2 +- test_queue_job/tests/test_job.py | 2 +- test_queue_job/tests/test_job_function.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/base_export_async/tests/test_base_export_async.py b/base_export_async/tests/test_base_export_async.py index c6909ebbd0..be6a26c61b 100644 --- a/base_export_async/tests/test_base_export_async.py +++ b/base_export_async/tests/test_base_export_async.py @@ -42,7 +42,7 @@ class TestBaseExportAsync(common.TransactionCase): def setUp(self): - super(TestBaseExportAsync, self).setUp() + super().setUp() self.delay_export_obj = self.env["delay.export"] self.job_obj = self.env["queue.job"] _request_stack.push( diff --git a/queue_job/tests/common.py b/queue_job/tests/common.py index 6bbc5be9e4..7afb1a2c9f 100644 --- a/queue_job/tests/common.py +++ b/queue_job/tests/common.py @@ -427,7 +427,7 @@ def __init__( def setUp(self): """Log an extra statement which test is started.""" - super(OdooDocTestCase, self).setUp() + super().setUp() logging.getLogger(__name__).info("Running tests for %s", self._dt_test.name) diff --git a/queue_job_subscribe/tests/test_job_subscribe.py b/queue_job_subscribe/tests/test_job_subscribe.py index 0f1fcddf48..935f15f74a 100644 --- a/queue_job_subscribe/tests/test_job_subscribe.py +++ b/queue_job_subscribe/tests/test_job_subscribe.py @@ -8,7 +8,7 @@ class TestJobSubscribe(common.TransactionCase): def setUp(self): - super(TestJobSubscribe, self).setUp() + super().setUp() grp_queue_job_manager = self.ref("queue_job.group_queue_job_manager") self.other_partner_a = self.env["res.partner"].create( {"name": "My Company a", "is_company": True, "email": "test@tes.ttest"} diff --git a/test_queue_job/models/test_models.py b/test_queue_job/models/test_models.py index 573e2380a9..03fa792137 100644 --- a/test_queue_job/models/test_models.py +++ b/test_queue_job/models/test_models.py @@ -76,7 +76,7 @@ def job_with_retry_pattern__no_zero(self): return def mapped(self, func): - return super(ModelTestQueueJob, self).mapped(func) + return super().mapped(func) def job_alter_mutable(self, mutable_arg, mutable_kwarg=None): mutable_arg.append(2) diff --git a/test_queue_job/tests/test_job.py b/test_queue_job/tests/test_job.py index cde6f8c67c..ad1d193c44 100644 --- a/test_queue_job/tests/test_job.py +++ b/test_queue_job/tests/test_job.py @@ -650,7 +650,7 @@ class TestJobStorageMultiCompany(common.TransactionCase): """Test storage of jobs""" def setUp(self): - super(TestJobStorageMultiCompany, self).setUp() + super().setUp() self.queue_job = self.env["queue.job"] grp_queue_job_manager = self.ref("queue_job.group_queue_job_manager") User = self.env["res.users"] diff --git a/test_queue_job/tests/test_job_function.py b/test_queue_job/tests/test_job_function.py index 17781ac475..320b4973c5 100644 --- a/test_queue_job/tests/test_job_function.py +++ b/test_queue_job/tests/test_job_function.py @@ -4,7 +4,7 @@ class TestJobFunction(common.TransactionCase): def setUp(self): - super(TestJobFunction, self).setUp() + super().setUp() self.test_function_model = self.env.ref( "queue_job.job_function_queue_job__test_job" ) From c223425e1554e4ed32358a07592eef1238e59e41 Mon Sep 17 00:00:00 2001 From: Lois Rilo Date: Fri, 11 Jun 2021 16:11:14 +0200 Subject: [PATCH 14/14] [13.0][FIX] queue_job_cron: channel_id must be storable. Otherwise, you cannot use any channel other than default ( root.ir_cron) --- queue_job_cron/models/ir_cron.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/queue_job_cron/models/ir_cron.py b/queue_job_cron/models/ir_cron.py index 7e4f5b848d..bb09ed075e 100644 --- a/queue_job_cron/models/ir_cron.py +++ b/queue_job_cron/models/ir_cron.py @@ -28,13 +28,16 @@ class IrCron(models.Model): comodel_name="queue.job.channel", compute="_compute_run_as_queue_job", readonly=False, + store=True, string="Channel", ) @api.depends("run_as_queue_job") def _compute_run_as_queue_job(self): for cron in self: - if cron.run_as_queue_job and not cron.channel_id: + if cron.channel_id: + continue + if cron.run_as_queue_job: cron.channel_id = self.env.ref("queue_job_cron.channel_root_ir_cron").id else: cron.channel_id = False