Sign Up
Log In
Log In
or
Sign Up
Places
All Projects
Status Monitor
Collapse sidebar
openSUSE:Leap:15.4:ARM
salt.28025
fix-the-regression-in-schedule-module-releasded...
Overview
Repositories
Revisions
Requests
Users
Attributes
Meta
File fix-the-regression-in-schedule-module-releasded-in-3.patch of Package salt.28025
From 7803275a8aaeedf2124706f51b6a54cfcfb2d032 Mon Sep 17 00:00:00 2001 From: Victor Zhestkov <Victor.Zhestkov@suse.com> Date: Thu, 1 Sep 2022 14:45:13 +0300 Subject: [PATCH] Fix the regression in schedule module releasded in 3004 (bsc#1202631) Co-authored-by: Gareth J. Greenaway <gareth@saltstack.com> --- changelog/61324.changed | 1 + salt/modules/schedule.py | 449 ++++++++++++++------ tests/pytests/unit/modules/test_schedule.py | 138 +++++- 3 files changed, 442 insertions(+), 146 deletions(-) create mode 100644 changelog/61324.changed diff --git a/changelog/61324.changed b/changelog/61324.changed new file mode 100644 index 0000000000..d67051a8da --- /dev/null +++ b/changelog/61324.changed @@ -0,0 +1 @@ +Adding the ability to add, delete, purge, and modify Salt scheduler jobs when the Salt minion is not running. diff --git a/salt/modules/schedule.py b/salt/modules/schedule.py index bcd64f2851..913a101ea6 100644 --- a/salt/modules/schedule.py +++ b/salt/modules/schedule.py @@ -15,6 +15,7 @@ import salt.utils.event import salt.utils.files import salt.utils.odict import salt.utils.yaml +import yaml try: import dateutil.parser as dateutil_parser @@ -64,7 +65,35 @@ SCHEDULE_CONF = [ ] -def list_(show_all=False, show_disabled=True, where=None, return_yaml=True): +def _get_schedule_config_file(): + """ + Return the minion schedule configuration file + """ + config_dir = __opts__.get("conf_dir", None) + if config_dir is None and "conf_file" in __opts__: + config_dir = os.path.dirname(__opts__["conf_file"]) + if config_dir is None: + config_dir = salt.syspaths.CONFIG_DIR + + minion_d_dir = os.path.join( + config_dir, + os.path.dirname( + __opts__.get( + "default_include", + salt.config.DEFAULT_MINION_OPTS["default_include"], + ) + ), + ) + + if not os.path.isdir(minion_d_dir): + os.makedirs(minion_d_dir) + + return os.path.join(minion_d_dir, "_schedule.conf") + + +def list_( + show_all=False, show_disabled=True, where=None, return_yaml=True, offline=False +): """ List the jobs currently scheduled on the minion @@ -83,24 +112,33 @@ def list_(show_all=False, show_disabled=True, where=None, return_yaml=True): """ schedule = {} - try: - with salt.utils.event.get_event("minion", opts=__opts__) as event_bus: - res = __salt__["event.fire"]( - {"func": "list", "where": where}, "manage_schedule" - ) - if res: - event_ret = event_bus.get_event( - tag="/salt/minion/minion_schedule_list_complete", wait=30 + if offline: + schedule_config = _get_schedule_config_file() + if os.path.exists(schedule_config): + with salt.utils.files.fopen(schedule_config) as fp_: + schedule_yaml = fp_.read() + if schedule_yaml: + schedule_contents = yaml.safe_load(schedule_yaml) + schedule = schedule_contents.get("schedule", {}) + else: + try: + with salt.utils.event.get_event("minion", opts=__opts__) as event_bus: + res = __salt__["event.fire"]( + {"func": "list", "where": where}, "manage_schedule" ) - if event_ret and event_ret["complete"]: - schedule = event_ret["schedule"] - except KeyError: - # Effectively a no-op, since we can't really return without an event system - ret = {} - ret["comment"] = "Event module not available. Schedule list failed." - ret["result"] = True - log.debug("Event module not available. Schedule list failed.") - return ret + if res: + event_ret = event_bus.get_event( + tag="/salt/minion/minion_schedule_list_complete", wait=30 + ) + if event_ret and event_ret["complete"]: + schedule = event_ret["schedule"] + except KeyError: + # Effectively a no-op, since we can't really return without an event system + ret = {} + ret["comment"] = "Event module not available. Schedule list failed." + ret["result"] = True + log.debug("Event module not available. Schedule list failed.") + return ret _hidden = ["enabled", "skip_function", "skip_during_range"] for job in list(schedule.keys()): # iterate over a copy since we will mutate it @@ -139,14 +177,11 @@ def list_(show_all=False, show_disabled=True, where=None, return_yaml=True): # remove _seconds from the listing del schedule[job]["_seconds"] - if schedule: - if return_yaml: - tmp = {"schedule": schedule} - return salt.utils.yaml.safe_dump(tmp, default_flow_style=False) - else: - return schedule + if return_yaml: + tmp = {"schedule": schedule} + return salt.utils.yaml.safe_dump(tmp, default_flow_style=False) else: - return {"schedule": {}} + return schedule def is_enabled(name=None): @@ -186,11 +221,18 @@ def purge(**kwargs): .. code-block:: bash salt '*' schedule.purge + + # Purge jobs on Salt minion + salt '*' schedule.purge + """ - ret = {"comment": [], "result": True} + ret = {"comment": [], "changes": {}, "result": True} - for name in list_(show_all=True, return_yaml=False): + current_schedule = list_( + show_all=True, return_yaml=False, offline=kwargs.get("offline") + ) + for name in pycopy.deepcopy(current_schedule): if name == "enabled": continue if name.startswith("__"): @@ -202,37 +244,65 @@ def purge(**kwargs): "Job: {} would be deleted from schedule.".format(name) ) else: - persist = kwargs.get("persist", True) + if kwargs.get("offline"): + del current_schedule[name] - try: - with salt.utils.event.get_event("minion", opts=__opts__) as event_bus: - res = __salt__["event.fire"]( - {"name": name, "func": "delete", "persist": persist}, - "manage_schedule", - ) - if res: - event_ret = event_bus.get_event( - tag="/salt/minion/minion_schedule_delete_complete", wait=30 + ret["comment"].append("Deleted job: {} from schedule.".format(name)) + ret["changes"][name] = "removed" + + else: + persist = kwargs.get("persist", True) + try: + with salt.utils.event.get_event( + "minion", opts=__opts__ + ) as event_bus: + res = __salt__["event.fire"]( + {"name": name, "func": "delete", "persist": persist}, + "manage_schedule", ) - if event_ret and event_ret["complete"]: - _schedule_ret = event_ret["schedule"] - if name not in _schedule_ret: - ret["result"] = True - ret["comment"].append( - "Deleted job: {} from schedule.".format(name) - ) - else: - ret["comment"].append( - "Failed to delete job {} from schedule.".format( - name + if res: + event_ret = event_bus.get_event( + tag="/salt/minion/minion_schedule_delete_complete", + wait=30, + ) + if event_ret and event_ret["complete"]: + _schedule_ret = event_ret["schedule"] + if name not in _schedule_ret: + ret["result"] = True + ret["changes"][name] = "removed" + ret["comment"].append( + "Deleted job: {} from schedule.".format(name) ) - ) - ret["result"] = True + else: + ret["comment"].append( + "Failed to delete job {} from schedule.".format( + name + ) + ) + ret["result"] = True + + except KeyError: + # Effectively a no-op, since we can't really return without an event system + ret["comment"] = "Event module not available. Schedule add failed." + ret["result"] = True + + # wait until the end to write file in offline mode + if kwargs.get("offline"): + schedule_conf = _get_schedule_config_file() + + try: + with salt.utils.files.fopen(schedule_conf, "wb+") as fp_: + fp_.write( + salt.utils.stringutils.to_bytes( + salt.utils.yaml.safe_dump({"schedule": current_schedule}) + ) + ) + except OSError: + log.error( + "Failed to persist the updated schedule", + exc_info_on_loglevel=logging.DEBUG, + ) - except KeyError: - # Effectively a no-op, since we can't really return without an event system - ret["comment"] = "Event module not available. Schedule add failed." - ret["result"] = True return ret @@ -245,6 +315,10 @@ def delete(name, **kwargs): .. code-block:: bash salt '*' schedule.delete job1 + + # Delete job on Salt minion when the Salt minion is not running + salt '*' schedule.delete job1 + """ ret = { @@ -260,45 +334,86 @@ def delete(name, **kwargs): ret["comment"] = "Job: {} would be deleted from schedule.".format(name) ret["result"] = True else: - persist = kwargs.get("persist", True) + if kwargs.get("offline"): + current_schedule = list_( + show_all=True, + where="opts", + return_yaml=False, + offline=kwargs.get("offline"), + ) - if name in list_(show_all=True, where="opts", return_yaml=False): - event_data = {"name": name, "func": "delete", "persist": persist} - elif name in list_(show_all=True, where="pillar", return_yaml=False): - event_data = { - "name": name, - "where": "pillar", - "func": "delete", - "persist": False, - } - else: - ret["comment"] = "Job {} does not exist.".format(name) - return ret + del current_schedule[name] - try: - with salt.utils.event.get_event("minion", opts=__opts__) as event_bus: - res = __salt__["event.fire"](event_data, "manage_schedule") - if res: - event_ret = event_bus.get_event( - tag="/salt/minion/minion_schedule_delete_complete", - wait=30, + schedule_conf = _get_schedule_config_file() + + try: + with salt.utils.files.fopen(schedule_conf, "wb+") as fp_: + fp_.write( + salt.utils.stringutils.to_bytes( + salt.utils.yaml.safe_dump({"schedule": current_schedule}) + ) ) - if event_ret and event_ret["complete"]: - schedule = event_ret["schedule"] - if name not in schedule: - ret["result"] = True - ret["comment"] = "Deleted Job {} from schedule.".format( - name - ) - ret["changes"][name] = "removed" - else: - ret[ - "comment" - ] = "Failed to delete job {} from schedule.".format(name) - return ret - except KeyError: - # Effectively a no-op, since we can't really return without an event system - ret["comment"] = "Event module not available. Schedule add failed." + except OSError: + log.error( + "Failed to persist the updated schedule", + exc_info_on_loglevel=logging.DEBUG, + ) + + ret["result"] = True + ret["comment"] = "Deleted Job {} from schedule.".format(name) + ret["changes"][name] = "removed" + else: + persist = kwargs.get("persist", True) + + if name in list_( + show_all=True, + where="opts", + return_yaml=False, + offline=kwargs.get("offline"), + ): + event_data = {"name": name, "func": "delete", "persist": persist} + elif name in list_( + show_all=True, + where="pillar", + return_yaml=False, + offline=kwargs.get("offline"), + ): + event_data = { + "name": name, + "where": "pillar", + "func": "delete", + "persist": False, + } + else: + ret["comment"] = "Job {} does not exist.".format(name) + return ret + + try: + with salt.utils.event.get_event("minion", opts=__opts__) as event_bus: + res = __salt__["event.fire"](event_data, "manage_schedule") + if res: + event_ret = event_bus.get_event( + tag="/salt/minion/minion_schedule_delete_complete", + wait=30, + ) + if event_ret and event_ret["complete"]: + schedule = event_ret["schedule"] + if name not in schedule: + ret["result"] = True + ret["comment"] = "Deleted Job {} from schedule.".format( + name + ) + ret["changes"][name] = "removed" + else: + ret[ + "comment" + ] = "Failed to delete job {} from schedule.".format( + name + ) + return ret + except KeyError: + # Effectively a no-op, since we can't really return without an event system + ret["comment"] = "Event module not available. Schedule add failed." return ret @@ -438,6 +553,10 @@ def add(name, **kwargs): salt '*' schedule.add job1 function='test.ping' seconds=3600 # If function have some arguments, use job_args salt '*' schedule.add job2 function='cmd.run' job_args="['date >> /tmp/date.log']" seconds=60 + + # Add job to Salt minion when the Salt minion is not running + salt '*' schedule.add job1 function='test.ping' seconds=3600 offline=True + """ ret = { @@ -445,8 +564,11 @@ def add(name, **kwargs): "result": False, "changes": {}, } + current_schedule = list_( + show_all=True, return_yaml=False, offline=kwargs.get("offline") + ) - if name in list_(show_all=True, return_yaml=False): + if name in current_schedule: ret["comment"] = "Job {} already exists in schedule.".format(name) ret["result"] = False return ret @@ -486,32 +608,56 @@ def add(name, **kwargs): ret["comment"] = "Job: {} would be added to schedule.".format(name) ret["result"] = True else: - try: - with salt.utils.event.get_event("minion", opts=__opts__) as event_bus: - res = __salt__["event.fire"]( - { - "name": name, - "schedule": schedule_data, - "func": "add", - "persist": persist, - }, - "manage_schedule", + if kwargs.get("offline"): + current_schedule.update(schedule_data) + + schedule_conf = _get_schedule_config_file() + + try: + with salt.utils.files.fopen(schedule_conf, "wb+") as fp_: + fp_.write( + salt.utils.stringutils.to_bytes( + salt.utils.yaml.safe_dump({"schedule": current_schedule}) + ) + ) + except OSError: + log.error( + "Failed to persist the updated schedule", + exc_info_on_loglevel=logging.DEBUG, ) - if res: - event_ret = event_bus.get_event( - tag="/salt/minion/minion_schedule_add_complete", - wait=30, + + ret["result"] = True + ret["comment"] = "Added job: {} to schedule.".format(name) + ret["changes"][name] = "added" + else: + try: + with salt.utils.event.get_event("minion", opts=__opts__) as event_bus: + res = __salt__["event.fire"]( + { + "name": name, + "schedule": schedule_data, + "func": "add", + "persist": persist, + }, + "manage_schedule", ) - if event_ret and event_ret["complete"]: - schedule = event_ret["schedule"] - if name in schedule: - ret["result"] = True - ret["comment"] = "Added job: {} to schedule.".format(name) - ret["changes"][name] = "added" - return ret - except KeyError: - # Effectively a no-op, since we can't really return without an event system - ret["comment"] = "Event module not available. Schedule add failed." + if res: + event_ret = event_bus.get_event( + tag="/salt/minion/minion_schedule_add_complete", + wait=30, + ) + if event_ret and event_ret["complete"]: + schedule = event_ret["schedule"] + if name in schedule: + ret["result"] = True + ret["comment"] = "Added job: {} to schedule.".format( + name + ) + ret["changes"][name] = "added" + return ret + except KeyError: + # Effectively a no-op, since we can't really return without an event system + ret["comment"] = "Event module not available. Schedule add failed." return ret @@ -524,6 +670,10 @@ def modify(name, **kwargs): .. code-block:: bash salt '*' schedule.modify job1 function='test.ping' seconds=3600 + + # Modify job on Salt minion when the Salt minion is not running + salt '*' schedule.modify job1 function='test.ping' seconds=3600 offline=True + """ ret = {"comment": "", "changes": {}, "result": True} @@ -549,7 +699,9 @@ def modify(name, **kwargs): ret["comment"] = 'Unable to use "when" and "cron" options together. Ignoring.' return ret - current_schedule = list_(show_all=True, return_yaml=False) + current_schedule = list_( + show_all=True, return_yaml=False, offline=kwargs.get("offline") + ) if name not in current_schedule: ret["comment"] = "Job {} does not exist in schedule.".format(name) @@ -566,8 +718,7 @@ def modify(name, **kwargs): _current["seconds"] = _current.pop("_seconds") # Copy _current _new, then update values from kwargs - _new = pycopy.deepcopy(_current) - _new.update(kwargs) + _new = build_schedule_item(name, **kwargs) # Remove test from kwargs, it's not a valid schedule option _new.pop("test", None) @@ -587,29 +738,51 @@ def modify(name, **kwargs): if "test" in kwargs and kwargs["test"]: ret["comment"] = "Job: {} would be modified in schedule.".format(name) else: - persist = kwargs.get("persist", True) - if name in list_(show_all=True, where="opts", return_yaml=False): - event_data = { - "name": name, - "schedule": _new, - "func": "modify", - "persist": persist, - } - elif name in list_(show_all=True, where="pillar", return_yaml=False): - event_data = { - "name": name, - "schedule": _new, - "where": "pillar", - "func": "modify", - "persist": False, - } + if kwargs.get("offline"): + current_schedule[name].update(_new) - out = __salt__["event.fire"](event_data, "manage_schedule") - if out: + schedule_conf = _get_schedule_config_file() + + try: + with salt.utils.files.fopen(schedule_conf, "wb+") as fp_: + fp_.write( + salt.utils.stringutils.to_bytes( + salt.utils.yaml.safe_dump({"schedule": current_schedule}) + ) + ) + except OSError: + log.error( + "Failed to persist the updated schedule", + exc_info_on_loglevel=logging.DEBUG, + ) + + ret["result"] = True ret["comment"] = "Modified job: {} in schedule.".format(name) + else: - ret["comment"] = "Failed to modify job {} in schedule.".format(name) - ret["result"] = False + persist = kwargs.get("persist", True) + if name in list_(show_all=True, where="opts", return_yaml=False): + event_data = { + "name": name, + "schedule": _new, + "func": "modify", + "persist": persist, + } + elif name in list_(show_all=True, where="pillar", return_yaml=False): + event_data = { + "name": name, + "schedule": _new, + "where": "pillar", + "func": "modify", + "persist": False, + } + + out = __salt__["event.fire"](event_data, "manage_schedule") + if out: + ret["comment"] = "Modified job: {} in schedule.".format(name) + else: + ret["comment"] = "Failed to modify job {} in schedule.".format(name) + ret["result"] = False return ret diff --git a/tests/pytests/unit/modules/test_schedule.py b/tests/pytests/unit/modules/test_schedule.py index e6cb134982..02914be82f 100644 --- a/tests/pytests/unit/modules/test_schedule.py +++ b/tests/pytests/unit/modules/test_schedule.py @@ -8,7 +8,8 @@ import pytest import salt.modules.schedule as schedule import salt.utils.odict from salt.utils.event import SaltEvent -from tests.support.mock import MagicMock, patch +from salt.utils.odict import OrderedDict +from tests.support.mock import MagicMock, call, mock_open, patch log = logging.getLogger(__name__) @@ -29,6 +30,11 @@ def sock_dir(tmp_path): return str(tmp_path / "test-socks") +@pytest.fixture +def schedule_config_file(tmp_path): + return "/etc/salt/minion.d/_schedule.conf" + + @pytest.fixture def configure_loader_modules(): return {schedule: {}} @@ -36,24 +42,56 @@ def configure_loader_modules(): # 'purge' function tests: 1 @pytest.mark.slow_test -def test_purge(sock_dir): +def test_purge(sock_dir, job1, schedule_config_file): """ Test if it purge all the jobs currently scheduled on the minion. """ + _schedule_data = {"job1": job1} with patch.dict(schedule.__opts__, {"schedule": {}, "sock_dir": sock_dir}): mock = MagicMock(return_value=True) with patch.dict(schedule.__salt__, {"event.fire": mock}): _ret_value = {"complete": True, "schedule": {}} with patch.object(SaltEvent, "get_event", return_value=_ret_value): - assert schedule.purge() == { - "comment": ["Deleted job: schedule from schedule."], + with patch.object( + schedule, "list_", MagicMock(return_value=_schedule_data) + ): + assert schedule.purge() == { + "comment": ["Deleted job: job1 from schedule."], + "changes": {"job1": "removed"}, + "result": True, + } + + _schedule_data = {"job1": job1, "job2": job1, "job3": job1} + comm = [ + "Deleted job: job1 from schedule.", + "Deleted job: job2 from schedule.", + "Deleted job: job3 from schedule.", + ] + + changes = {"job1": "removed", "job2": "removed", "job3": "removed"} + + with patch.dict( + schedule.__opts__, {"schedule": {"job1": "salt"}, "sock_dir": sock_dir} + ): + with patch("salt.utils.files.fopen", mock_open(read_data="")) as fopen_mock: + with patch.object( + schedule, "list_", MagicMock(return_value=_schedule_data) + ): + assert schedule.purge(offline=True) == { + "comment": comm, + "changes": changes, "result": True, } + _call = call(b"schedule: {}\n") + write_calls = fopen_mock.filehandles[schedule_config_file][ + 0 + ].write._mock_mock_calls + assert _call in write_calls # 'delete' function tests: 1 @pytest.mark.slow_test -def test_delete(sock_dir): +def test_delete(sock_dir, job1, schedule_config_file): """ Test if it delete a job from the minion's schedule. """ @@ -68,6 +106,28 @@ def test_delete(sock_dir): "result": False, } + _schedule_data = {"job1": job1} + comm = "Deleted Job job1 from schedule." + changes = {"job1": "removed"} + with patch.dict( + schedule.__opts__, {"schedule": {"job1": "salt"}, "sock_dir": sock_dir} + ): + with patch("salt.utils.files.fopen", mock_open(read_data="")) as fopen_mock: + with patch.object( + schedule, "list_", MagicMock(return_value=_schedule_data) + ): + assert schedule.delete("job1", offline="True") == { + "comment": comm, + "changes": changes, + "result": True, + } + + _call = call(b"schedule: {}\n") + write_calls = fopen_mock.filehandles[schedule_config_file][ + 0 + ].write._mock_mock_calls + assert _call in write_calls + # 'build_schedule_item' function tests: 1 def test_build_schedule_item(sock_dir): @@ -120,7 +180,7 @@ def test_build_schedule_item_invalid_when(sock_dir): @pytest.mark.slow_test -def test_add(sock_dir): +def test_add(sock_dir, schedule_config_file): """ Test if it add a job to the schedule. """ @@ -163,6 +223,24 @@ def test_add(sock_dir): "result": True, } + comm1 = "Added job: job3 to schedule." + changes1 = {"job3": "added"} + with patch.dict( + schedule.__opts__, {"schedule": {"job1": "salt"}, "sock_dir": sock_dir} + ): + with patch("salt.utils.files.fopen", mock_open(read_data="")) as fopen_mock: + assert schedule.add( + "job3", function="test.ping", seconds=3600, offline="True" + ) == {"comment": comm1, "changes": changes1, "result": True} + + _call = call( + b"schedule:\n job3: {function: test.ping, seconds: 3600, maxrunning: 1, name: job3, enabled: true,\n jid_include: true}\n" + ) + write_calls = fopen_mock.filehandles[schedule_config_file][ + 1 + ].write._mock_mock_calls + assert _call in write_calls + # 'run_job' function tests: 1 @@ -444,7 +522,7 @@ def test_copy(sock_dir, job1): @pytest.mark.slow_test -def test_modify(sock_dir): +def test_modify(sock_dir, job1, schedule_config_file): """ Test if modifying job to the schedule. """ @@ -564,7 +642,6 @@ def test_modify(sock_dir): for key in [ "maxrunning", "function", - "seconds", "jid_include", "name", "enabled", @@ -586,6 +663,51 @@ def test_modify(sock_dir): ret = schedule.modify("job2", function="test.version", test=True) assert ret == expected5 + _schedule_data = {"job1": job1} + comm = "Modified job: job1 in schedule." + changes = {"job1": "removed"} + + changes = { + "job1": { + "new": OrderedDict( + [ + ("function", "test.version"), + ("maxrunning", 1), + ("name", "job1"), + ("enabled", True), + ("jid_include", True), + ] + ), + "old": OrderedDict( + [ + ("function", "test.ping"), + ("maxrunning", 1), + ("name", "job1"), + ("jid_include", True), + ("enabled", True), + ] + ), + } + } + with patch.dict( + schedule.__opts__, {"schedule": {"job1": "salt"}, "sock_dir": sock_dir} + ): + with patch("salt.utils.files.fopen", mock_open(read_data="")) as fopen_mock: + with patch.object( + schedule, "list_", MagicMock(return_value=_schedule_data) + ): + assert schedule.modify( + "job1", function="test.version", offline="True" + ) == {"comment": comm, "changes": changes, "result": True} + + _call = call( + b"schedule:\n job1: {enabled: true, function: test.version, jid_include: true, maxrunning: 1,\n name: job1}\n" + ) + write_calls = fopen_mock.filehandles[schedule_config_file][ + 0 + ].write._mock_mock_calls + assert _call in write_calls + # 'is_enabled' function tests: 1 -- 2.37.2
Locations
Projects
Search
Status Monitor
Help
OpenBuildService.org
Documentation
API Documentation
Code of Conduct
Contact
Support
@OBShq
Terms
openSUSE Build Service is sponsored by
The Open Build Service is an
openSUSE project
.
Sign Up
Log In
Places
Places
All Projects
Status Monitor