Wipe addons/: full reset for clean re-upload
This commit is contained in:
@@ -1,3 +0,0 @@
|
||||
from . import test_command
|
||||
from . import test_command_log
|
||||
from . import test_file
|
||||
@@ -1,145 +0,0 @@
|
||||
from datetime import timedelta
|
||||
from unittest.mock import patch
|
||||
|
||||
from odoo.fields import Datetime
|
||||
from odoo.tools import mute_logger
|
||||
|
||||
from odoo.addons.cetmix_tower_server.tests.common import TestTowerCommon
|
||||
|
||||
|
||||
class TestTowerCommand(TestTowerCommon):
|
||||
"""Test suite for verifying zombie command detection and related
|
||||
queue job cancellation.
|
||||
|
||||
Tests in this class verify that commands which have been running
|
||||
longer than the timeout are properly detected as zombies, and their
|
||||
associated queue jobs are cancelled.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super().setUpClass()
|
||||
# Set command timeout to 10 seconds
|
||||
cls.env["ir.config_parameter"].sudo().set_param(
|
||||
"cetmix_tower_server.command_timeout", "10"
|
||||
)
|
||||
# Set old time to 20 seconds ago (older than timeout)
|
||||
# to simulate running command in past
|
||||
now = Datetime.now()
|
||||
cls.old_time = now - timedelta(seconds=20)
|
||||
|
||||
def _patch_command_runner(self, command_type, runner_method):
|
||||
"""Helper to patch a command runner to simulate a zombie command.
|
||||
|
||||
Args:
|
||||
command_type: Type of command runner to patch ('ssh' or 'python_code')
|
||||
runner_method: Original method to wrap
|
||||
|
||||
Returns:
|
||||
A context manager that applies the patch
|
||||
"""
|
||||
|
||||
def _wrapper(*args, **kwargs):
|
||||
# Modify args to disable log record finishing
|
||||
args = list(args)
|
||||
if len(args) > 1:
|
||||
args[1] = False # Set log_record to False
|
||||
return runner_method(*args, **kwargs)
|
||||
|
||||
return patch.object(
|
||||
self.registry["cx.tower.server"],
|
||||
f"_command_runner_{command_type}",
|
||||
_wrapper,
|
||||
)
|
||||
|
||||
def _verify_zombie_command_job_cancellation(self, command_action):
|
||||
"""Verify zombie command is detected and job is cancelled.
|
||||
|
||||
Args:
|
||||
command_action: Action type ('ssh_command' or 'python_code')
|
||||
"""
|
||||
# check zombie command logs
|
||||
domain = [
|
||||
("is_running", "=", True),
|
||||
("start_date", "=", self.old_time),
|
||||
("command_action", "=", command_action),
|
||||
]
|
||||
zombie_command_logs = self.env["cx.tower.command.log"].search(domain)
|
||||
|
||||
self.assertEqual(
|
||||
len(zombie_command_logs), 1, "Zombie command log should be created"
|
||||
)
|
||||
self.assertTrue(
|
||||
zombie_command_logs.queue_job_id,
|
||||
"Zombie command log should have queue job",
|
||||
)
|
||||
|
||||
job = zombie_command_logs.queue_job_id
|
||||
self.assertTrue(job.exists(), "Zombie command job should exist")
|
||||
|
||||
self.assertEqual(job.state, "pending", "Zombie command job should be pending")
|
||||
|
||||
# run process to kill zombie command
|
||||
self.server_test_1._check_zombie_commands()
|
||||
|
||||
# check that command log is cancelled
|
||||
self.assertEqual(
|
||||
job.state, "cancelled", "Zombie command job should be cancelled"
|
||||
)
|
||||
|
||||
def test_check_zombie_ssh_command_queue(self):
|
||||
"""
|
||||
Test that zombie ssh command is killed and job is cancelled
|
||||
"""
|
||||
# Create test commands
|
||||
ssh_command = self.Command.create(
|
||||
{
|
||||
"name": "Test SSH Command",
|
||||
"code": "ls -la",
|
||||
"action": "ssh_command",
|
||||
}
|
||||
)
|
||||
|
||||
# patch command runner to not finish log record
|
||||
cx_tower_server_obj = self.registry["cx.tower.server"]
|
||||
_command_runner_ssh_super = cx_tower_server_obj._command_runner_ssh
|
||||
|
||||
with self._patch_command_runner("ssh", _command_runner_ssh_super):
|
||||
# run zombie command with log creation in past
|
||||
self.server_test_1.run_command(
|
||||
ssh_command, log={"start_date": self.old_time}
|
||||
)
|
||||
|
||||
# check zombie command logs
|
||||
self._verify_zombie_command_job_cancellation("ssh_command")
|
||||
|
||||
@mute_logger("py.warnings")
|
||||
def test_check_zombie_python_command_queue(self):
|
||||
"""
|
||||
Test that zombie python command is killed and job is cancelled
|
||||
"""
|
||||
# Create test commands
|
||||
python_command = self.Command.create(
|
||||
{
|
||||
"name": "Test Python Command",
|
||||
"code": "print('test')",
|
||||
"action": "python_code",
|
||||
}
|
||||
)
|
||||
|
||||
# patch command runner to not finish log record
|
||||
cx_tower_server_obj = self.registry["cx.tower.server"]
|
||||
_command_runner_python_code_super = (
|
||||
cx_tower_server_obj._command_runner_python_code
|
||||
)
|
||||
|
||||
with self._patch_command_runner(
|
||||
"python_code", _command_runner_python_code_super
|
||||
):
|
||||
# run zombie command with log creation in past
|
||||
self.server_test_1.run_command(
|
||||
python_command, log={"start_date": self.old_time}
|
||||
)
|
||||
|
||||
# check zombie command logs
|
||||
self._verify_zombie_command_job_cancellation("python_code")
|
||||
@@ -1,37 +0,0 @@
|
||||
from odoo.addons.cetmix_tower_server.tests.common import TestTowerCommon
|
||||
from odoo.addons.queue_job.job import Job
|
||||
|
||||
|
||||
class TestTowerCommand(TestTowerCommon):
|
||||
"""
|
||||
Test cases for command log state on queue_job failure
|
||||
"""
|
||||
|
||||
def test_command_log_state_on_job_fail(self):
|
||||
command = self.env["cx.tower.command"].create(
|
||||
{
|
||||
"name": "Test Command",
|
||||
"action": "ssh_command",
|
||||
"code": "echo 'Hello World'",
|
||||
}
|
||||
)
|
||||
self.assertTrue(command.id, "Command should be created successfully")
|
||||
|
||||
self.server_test_1.run_command(command=command)
|
||||
command_log = self.env["cx.tower.command.log"].search(
|
||||
[("command_id", "=", command.id)], order="id desc", limit=1
|
||||
)
|
||||
self.assertTrue(command_log, "Command log should be created")
|
||||
|
||||
job = command_log.queue_job_id
|
||||
self.assertTrue(job, "Queue job should be associated with command log")
|
||||
|
||||
job_obj = Job.load(self.env, job.uuid)
|
||||
job_obj.set_failed()
|
||||
job_obj.store()
|
||||
self.assertEqual(job.state, "failed", "Job should be in failed state")
|
||||
self.assertEqual(
|
||||
command_log.command_status,
|
||||
self.env["queue.job"].QUEUE_JOB_ERROR,
|
||||
"Command log should be in failed state",
|
||||
)
|
||||
@@ -1,201 +0,0 @@
|
||||
from odoo import exceptions
|
||||
|
||||
from odoo.addons.cetmix_tower_server.tests.common import TestTowerCommon
|
||||
from odoo.addons.queue_job.tests.common import trap_jobs
|
||||
|
||||
|
||||
class TestCxTowerFileQueue(TestTowerCommon):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.file_template = self.FileTemplate.create(
|
||||
{
|
||||
"name": "Test",
|
||||
"file_name": "test.txt",
|
||||
"server_dir": "/var/tmp",
|
||||
"code": "Hello, world!",
|
||||
}
|
||||
)
|
||||
|
||||
def test_async_upload_operations(self):
|
||||
"""Test that upload operations are processed asynchronously"""
|
||||
# Create unique files specifically for this test
|
||||
upload_file = self.File.create(
|
||||
{
|
||||
"source": "tower",
|
||||
"template_id": self.file_template.id,
|
||||
"server_id": self.server_test_1.id,
|
||||
"name": "upload_test_1",
|
||||
"auto_sync": False,
|
||||
}
|
||||
)
|
||||
|
||||
upload_file_2 = self.File.create(
|
||||
{
|
||||
"name": "upload_test_2",
|
||||
"source": "server",
|
||||
"server_id": self.server_test_1.id,
|
||||
"server_dir": "/var/tmp",
|
||||
"auto_sync": False,
|
||||
}
|
||||
)
|
||||
|
||||
with trap_jobs() as trap:
|
||||
upload_file.upload()
|
||||
upload_file_2.upload()
|
||||
|
||||
self.assertEqual(len(trap.enqueued_jobs), 2)
|
||||
|
||||
upload_file.write({"server_response": "ok", "is_being_processed": False})
|
||||
upload_file_2.write({"server_response": "ok", "is_being_processed": False})
|
||||
|
||||
# Refresh records to get updated values
|
||||
upload_file.invalidate_recordset()
|
||||
upload_file_2.invalidate_recordset()
|
||||
|
||||
# Verify the expected state
|
||||
self.assertEqual(upload_file.server_response, "ok")
|
||||
self.assertFalse(upload_file.is_being_processed)
|
||||
|
||||
self.assertEqual(upload_file_2.server_response, "ok")
|
||||
self.assertFalse(upload_file_2.is_being_processed)
|
||||
|
||||
def test_async_download_operations(self):
|
||||
"""Test that download operations are processed asynchronously"""
|
||||
# Create unique files specifically for this test
|
||||
download_file = self.File.create(
|
||||
{
|
||||
"source": "tower",
|
||||
"template_id": self.file_template.id,
|
||||
"server_id": self.server_test_1.id,
|
||||
"name": "download_test_1",
|
||||
"auto_sync": False,
|
||||
}
|
||||
)
|
||||
|
||||
download_file_2 = self.File.create(
|
||||
{
|
||||
"name": "download_test_2",
|
||||
"source": "server",
|
||||
"server_id": self.server_test_1.id,
|
||||
"server_dir": "/var/tmp",
|
||||
"auto_sync": False,
|
||||
}
|
||||
)
|
||||
|
||||
with trap_jobs() as trap:
|
||||
download_file.download()
|
||||
download_file_2.download()
|
||||
|
||||
# Verify jobs were created
|
||||
self.assertEqual(len(trap.enqueued_jobs), 2)
|
||||
|
||||
download_file.write({"server_response": "ok", "is_being_processed": False})
|
||||
download_file_2.write(
|
||||
{"server_response": "ok", "is_being_processed": False}
|
||||
)
|
||||
|
||||
# Refresh records to get updated values
|
||||
download_file.invalidate_recordset()
|
||||
download_file_2.invalidate_recordset()
|
||||
|
||||
# Verify the expected state
|
||||
self.assertEqual(download_file.server_response, "ok")
|
||||
self.assertFalse(download_file.is_being_processed)
|
||||
|
||||
self.assertEqual(download_file_2.server_response, "ok")
|
||||
self.assertFalse(download_file_2.is_being_processed)
|
||||
|
||||
def test_upload_error_handling(self):
|
||||
"""Test error handling in async upload operations"""
|
||||
error_file = self.File.create(
|
||||
{
|
||||
"source": "tower",
|
||||
"template_id": self.file_template.id,
|
||||
"server_id": self.server_test_1.id,
|
||||
"name": "error_handling_test",
|
||||
"auto_sync": False,
|
||||
}
|
||||
)
|
||||
|
||||
# Set context to force the mock in ssh_upload_file to raise error
|
||||
error_context = {"raise_upload_error": "Forced upload error"}
|
||||
|
||||
with trap_jobs() as trap:
|
||||
# This will trigger job creation but the job would fail if executed
|
||||
error_file.with_context(**error_context).upload(raise_error=True)
|
||||
|
||||
# Verify job was created
|
||||
self.assertEqual(len(trap.enqueued_jobs), 1)
|
||||
|
||||
# Simulate what would happen if the job executed and failed
|
||||
error_file.write({"server_response": "error", "is_being_processed": False})
|
||||
error_file.invalidate_recordset()
|
||||
|
||||
self.assertEqual(error_file.server_response, "error")
|
||||
self.assertFalse(error_file.is_being_processed)
|
||||
|
||||
def test_download_error_handling(self):
|
||||
"""Test error handling in async download operations"""
|
||||
error_file = self.File.create(
|
||||
{
|
||||
"source": "server",
|
||||
"server_id": self.server_test_1.id,
|
||||
"server_dir": "/var/tmp",
|
||||
"name": "download_error_test",
|
||||
}
|
||||
)
|
||||
|
||||
# Set context to force the mock in ssh_download_file to raise error
|
||||
error_context = {"raise_download_error": "Forced download error"}
|
||||
|
||||
with trap_jobs() as trap:
|
||||
# This will trigger job creation but the job would fail if executed
|
||||
error_file.with_context(**error_context).download(raise_error=True)
|
||||
|
||||
# Verify job was created
|
||||
self.assertEqual(len(trap.enqueued_jobs), 1)
|
||||
|
||||
# Simulate what would happen if the job executed and failed
|
||||
error_file.write({"server_response": "error", "is_being_processed": False})
|
||||
error_file.invalidate_recordset()
|
||||
|
||||
self.assertEqual(error_file.server_response, "error")
|
||||
self.assertFalse(error_file.is_being_processed)
|
||||
|
||||
def test_already_processing_check(self):
|
||||
"""Test that files being processed cannot be processed again"""
|
||||
processing_file = self.File.create(
|
||||
{
|
||||
"source": "tower",
|
||||
"template_id": self.file_template.id,
|
||||
"server_id": self.server_test_1.id,
|
||||
"name": "processing_test_file",
|
||||
"is_being_processed": True,
|
||||
}
|
||||
)
|
||||
|
||||
self.assertTrue(processing_file.is_being_processed)
|
||||
|
||||
# Test with raising error
|
||||
with self.assertRaises(exceptions.UserError):
|
||||
processing_file.upload(raise_error=True)
|
||||
|
||||
# Test without raising error - should not create job
|
||||
with trap_jobs() as trap:
|
||||
processing_file.upload(raise_error=False)
|
||||
# No job should be created since file is already being processed
|
||||
self.assertEqual(len(trap.enqueued_jobs), 0)
|
||||
|
||||
# Verify still marked as processing
|
||||
self.assertTrue(processing_file.is_being_processed)
|
||||
|
||||
# Same tests for download
|
||||
with self.assertRaises(exceptions.UserError):
|
||||
processing_file.download(raise_error=True)
|
||||
|
||||
with trap_jobs() as trap:
|
||||
processing_file.download(raise_error=False)
|
||||
# No job should be created
|
||||
self.assertEqual(len(trap.enqueued_jobs), 0)
|
||||
|
||||
self.assertTrue(processing_file.is_being_processed)
|
||||
Reference in New Issue
Block a user