commit
c6a7333909
|
@ -3,9 +3,11 @@
|
|||
import logging
|
||||
|
||||
import psycopg2
|
||||
from psycopg2 import OperationalError
|
||||
|
||||
from odoo import _, api, fields, models
|
||||
from odoo.exceptions import UserError
|
||||
from odoo.service.model import PG_CONCURRENCY_ERRORS_TO_RETRY
|
||||
|
||||
from odoo.addons.queue_job.exception import RetryableJobError
|
||||
|
||||
|
@ -115,18 +117,38 @@ class AttachmentQueue(models.Model):
|
|||
seconds=DEFAULT_ETA_FOR_RETRY,
|
||||
ignore_retry=True,
|
||||
) from exc
|
||||
if self.state == "pending":
|
||||
if self.state != "done":
|
||||
try:
|
||||
with self.env.cr.savepoint():
|
||||
self.run()
|
||||
except Exception as e:
|
||||
_logger.warning(STR_ERROR_DURING_PROCESSING.format(self.id) + str(e))
|
||||
self.write({"state": "failed", "state_message": str(e)})
|
||||
emails = self.failure_emails
|
||||
if emails:
|
||||
self.env.ref(
|
||||
"attachment_queue.attachment_failure_notification"
|
||||
).send_mail(self.id)
|
||||
except OperationalError as err:
|
||||
# re-raise typical transaction serialization error so queue job retries
|
||||
# no need to set attachment as failed since it will be retried.
|
||||
if err.pgcode in PG_CONCURRENCY_ERRORS_TO_RETRY:
|
||||
raise
|
||||
self._set_attachment_failure(err)
|
||||
except RetryableJobError as err:
|
||||
# re-raise Retryable Error to keep the functionality
|
||||
# we still set the attachment as failed here because it may not be
|
||||
# retried (in case max_retries is reached). We would not want a
|
||||
# pending attachment with no related pending job
|
||||
self._set_attachment_failure(err)
|
||||
# a rollback has been made before because of the savepoint.
|
||||
# we need to commit because we re-raise the exception and a rollback
|
||||
# will be performed
|
||||
self.env.cr.commit() # pylint: disable=E8102
|
||||
raise
|
||||
except Exception as err:
|
||||
self._set_attachment_failure(err)
|
||||
|
||||
def _set_attachment_failure(self, error):
|
||||
_logger.warning(STR_ERROR_DURING_PROCESSING.format(self.id) + str(error))
|
||||
self.write({"state": "failed", "state_message": str(error)})
|
||||
emails = self.failure_emails
|
||||
if emails:
|
||||
self.env.ref("attachment_queue.attachment_failure_notification").send_mail(
|
||||
self.id
|
||||
)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
|
|
|
@ -5,6 +5,8 @@ import base64
|
|||
|
||||
from odoo import api, fields, models
|
||||
|
||||
from odoo.addons.queue_job.exception import RetryableJobError
|
||||
|
||||
|
||||
class AttachmentQueue(models.Model):
|
||||
_inherit = "attachment.queue"
|
||||
|
@ -30,17 +32,26 @@ class AttachmentQueue(models.Model):
|
|||
def _run(self):
|
||||
res = super()._run()
|
||||
if self.file_type == "export":
|
||||
fs = self.fs_storage_id.fs
|
||||
folder_path = self.task_id.filepath
|
||||
full_path = (
|
||||
folder_path and fs.sep.join([folder_path, self.name]) or self.name
|
||||
)
|
||||
# create missing folders if necessary :
|
||||
if folder_path and not fs.exists(folder_path):
|
||||
fs.makedirs(folder_path)
|
||||
self._write_file_to_remote(fs, full_path)
|
||||
try:
|
||||
fs = self.fs_storage_id.fs
|
||||
folder_path = self.task_id.filepath
|
||||
full_path = (
|
||||
folder_path and fs.sep.join([folder_path, self.name]) or self.name
|
||||
)
|
||||
# create missing folders if necessary :
|
||||
if folder_path and not fs.exists(folder_path):
|
||||
fs.makedirs(folder_path)
|
||||
self._write_file_to_remote(fs, full_path)
|
||||
except TimeoutError as err:
|
||||
raise RetryableJobError(
|
||||
str(err),
|
||||
seconds=self._timeout_retry_seconds(),
|
||||
) from err
|
||||
return res
|
||||
|
||||
def _timeout_retry_seconds(self):
|
||||
return 60 * 60 * 4
|
||||
|
||||
def _get_failure_emails(self):
|
||||
res = super()._get_failure_emails()
|
||||
if self.task_id.failure_emails:
|
||||
|
|
Loading…
Reference in New Issue