Merge pull request #141 from thedac/series-upgrade-daemons

Juju daemons remain up after pre-series-upgrade
This commit is contained in:
Liam Young
2018-10-09 18:15:57 +01:00
committed by GitHub
3 changed files with 28 additions and 7 deletions

View File

@@ -259,6 +259,7 @@ class TestGenericUtils(ut_utils.BaseTestCase):
_origin = "source"
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
_completed_machines = []
# Peers and Subordinates
_run_action_calls = [
mock.call("{}-hacluster/1".format(_application),
@@ -283,6 +284,7 @@ class TestGenericUtils(ut_utils.BaseTestCase):
to_series=_to_series, from_series=_from_series,
pause_non_leader_primary=True,
pause_non_leader_subordinate=True,
completed_machines=_completed_machines,
workaround_script=_workaround_script, files=_files),
self.run_action.assert_has_calls(_run_action_calls)
self.series_upgrade.assert_has_calls(_series_upgrade_calls)
@@ -296,6 +298,7 @@ class TestGenericUtils(ut_utils.BaseTestCase):
_origin = "source"
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
_completed_machines = []
# Subordinates only
_run_action_calls = [
mock.call("{}-hacluster/1".format(_application),
@@ -319,6 +322,7 @@ class TestGenericUtils(ut_utils.BaseTestCase):
to_series=_to_series, from_series=_from_series,
pause_non_leader_primary=False,
pause_non_leader_subordinate=True,
completed_machines=_completed_machines,
workaround_script=_workaround_script, files=_files),
self.run_action.assert_has_calls(_run_action_calls)
self.series_upgrade.assert_has_calls(_series_upgrade_calls)
@@ -333,6 +337,7 @@ class TestGenericUtils(ut_utils.BaseTestCase):
_series_upgrade_calls = []
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
_completed_machines = []
for machine_num in ("0", "1", "2"):
_series_upgrade_calls.append(
@@ -348,6 +353,7 @@ class TestGenericUtils(ut_utils.BaseTestCase):
to_series=_to_series, from_series=_from_series,
pause_non_leader_primary=False,
pause_non_leader_subordinate=False,
completed_machines=_completed_machines,
workaround_script=_workaround_script, files=_files)
self.run_action.assert_not_called()
self.series_upgrade.assert_has_calls(_series_upgrade_calls)

View File

@@ -46,6 +46,7 @@ class SeriesUpgradeTest(unittest.TestCase):
os.environ["JUJU_DEV_FEATURE_FLAGS"] = "upgrade-series"
applications = model.get_status().applications
completed_machines = []
for application in applications:
# Defaults
origin = "openstack-origin"
@@ -78,6 +79,7 @@ class SeriesUpgradeTest(unittest.TestCase):
from_series=self.from_series,
to_series=self.to_series,
origin=origin,
completed_machines=completed_machines,
workaround_script=self.workaround_script,
files=self.files)

View File

@@ -172,6 +172,7 @@ def series_upgrade_application(application, pause_non_leader_primary=True,
pause_non_leader_subordinate=True,
from_series="trusty", to_series="xenial",
origin='openstack-origin',
completed_machines=[],
files=None, workaround_script=None):
"""Series upgrade application.
@@ -194,6 +195,9 @@ def series_upgrade_application(application, pause_non_leader_primary=True,
:param origin: The configuration setting variable name for changing origin
source. (openstack-origin or source)
:type origin: str
:param completed_machines: List of completed machines which do no longer
require series upgrade.
:type files: list
:param files: Workaround files to scp to unit under upgrade
:type files: list
:param workaround_script: Workaround script to run during series upgrade
@@ -202,7 +206,6 @@ def series_upgrade_application(application, pause_non_leader_primary=True,
:rtype: None
"""
status = model.get_status().applications[application]
completed_machines = []
# For some applications (percona-cluster) the leader unit must upgrade
# first. For API applications the non-leader haclusters must be paused
@@ -237,8 +240,12 @@ def series_upgrade_application(application, pause_non_leader_primary=True,
files=files)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded"
.format(unit, machine))
logging.info("Skipping unit: {}. Machine: {} already upgraded."
"But setting origin on the application {}"
.format(unit, machine, application))
logging.info("Set origin on {}".format(application))
set_origin(application, origin)
model.block_until_all_units_idle()
# Series upgrade the non-leaders
for unit in non_leaders:
@@ -252,8 +259,12 @@ def series_upgrade_application(application, pause_non_leader_primary=True,
files=files)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded"
.format(unit, machine))
logging.info("Skipping unit: {}. Machine: {} already upgraded. "
"But setting origin on the application {}"
.format(unit, machine, application))
logging.info("Set origin on {}".format(application))
set_origin(application, origin)
model.block_until_all_units_idle()
def series_upgrade(unit_name, machine_num,
@@ -285,9 +296,11 @@ def series_upgrade(unit_name, machine_num,
set_dpkg_non_interactive_on_unit(unit_name)
logging.info("Prepare series upgrade on {}".format(machine_num))
model.prepare_series_upgrade(machine_num, to_series=to_series)
logging.info("Watiing for workload status 'unknown' on {}"
logging.info("Watiing for workload status 'blocked' on {}"
.format(unit_name))
model.block_until_unit_wl_status(unit_name, "unknown")
model.block_until_unit_wl_status(unit_name, "blocked")
logging.info("Watiing for model idleness")
model.block_until_all_units_idle()
wrap_do_release_upgrade(unit_name, from_series=from_series,
to_series=to_series, files=files,
workaround_script=workaround_script)