From 93c82dadf74139deeadcd0c789790475a0b182cb Mon Sep 17 00:00:00 2001 From: Yannick Payot Date: Wed, 16 Aug 2023 19:47:27 +0200 Subject: [PATCH] [IMP] queue_job_batch: Improve perf of counters In the case of a batch of 2k jobs having large task definition a memory error would be reached easily. Using a read saves some memory by avoiding the pre-fetching. --- queue_job_batch/models/queue_job_batch.py | 26 +++++++++++------------ 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/queue_job_batch/models/queue_job_batch.py b/queue_job_batch/models/queue_job_batch.py index 730a3b7e78..d967231349 100644 --- a/queue_job_batch/models/queue_job_batch.py +++ b/queue_job_batch/models/queue_job_batch.py @@ -116,18 +116,18 @@ def get_new_batch(self, name, **kwargs): }) return self.sudo().create(vals).sudo(self.env.uid) - @api.depends('job_ids') + @api.depends("job_ids", "job_ids.state") def _compute_job_count(self): for record in self: - job_count = len(record.job_ids) - failed_job_count = len(record.job_ids.filtered( - lambda r: r.state == 'failed' - )) - done_job_count = len(record.job_ids.filtered( - lambda r: r.state == 'done' - )) - record.job_count = job_count - record.finished_job_count = done_job_count - record.failed_job_count = failed_job_count - record.completeness = done_job_count / max(1, job_count) - record.failed_percentage = failed_job_count / max(1, job_count) + jobs = record.job_ids + states = [r["state"] for r in jobs.read(["state"])] + + total = len(jobs) + failed = states.count("failed") + done = states.count("done") + + record.job_count = total + record.finished_job_count = done + record.failed_job_count = failed + record.completeness = done / max(1, total) + record.failed_percentage = failed / max(1, total)