Merge pull request #306 from gnuoy/fix-hacluster-tests
Fix hacluster tests
This commit is contained in:
@@ -20,7 +20,6 @@ import logging
|
||||
import os
|
||||
|
||||
import zaza.openstack.charm_tests.test_utils as test_utils
|
||||
import zaza.openstack.utilities.juju as juju_utils
|
||||
import zaza.openstack.configure.hacluster
|
||||
|
||||
|
||||
@@ -35,78 +34,15 @@ class HaclusterTest(test_utils.OpenStackBaseTest):
|
||||
|
||||
def test_900_action_cleanup(self):
|
||||
"""The services can be cleaned up."""
|
||||
status = zaza.model.get_status().applications[self.application_name]
|
||||
|
||||
# libjuju juju status no longer has units for subordinate charms
|
||||
# Use the application it is subordinate-to to check workload status
|
||||
if status.get("units") is None and status.get("subordinate-to"):
|
||||
primary_status = juju_utils.get_application_status(
|
||||
status.get("subordinate-to")[0])
|
||||
leader = None
|
||||
for unit in primary_status["units"]:
|
||||
if primary_status["units"][unit].get('leader'):
|
||||
leader = unit
|
||||
|
||||
if primary_status["units"][leader].get("subordinates"):
|
||||
for subordinate in primary_status["units"][leader]["subordinates"]:
|
||||
# mysql-router is a subordinate from focal onwards
|
||||
_app = subordinate.split('/')[0]
|
||||
if _app != 'hacluster':
|
||||
continue
|
||||
logging.info("Cleaning {}".format(subordinate))
|
||||
_action = "cleanup"
|
||||
action_id = zaza.model.run_action(subordinate, "cleanup")
|
||||
assert "success" in action_id.data["results"]["result"], (
|
||||
"Set hacluster action {} failed: {}"
|
||||
.format(_action, action_id.data))
|
||||
|
||||
logging.info("Cleaning action w/resource {}"
|
||||
.format(subordinate))
|
||||
params = {'resource': 'res_ks_haproxy'}
|
||||
_action = "cleanup res_ks_haproxy"
|
||||
zaza.model.run_action(subordinate, "cleanup",
|
||||
action_params=params)
|
||||
assert "success" in action_id.data["results"]["result"], (
|
||||
"Set hacluster action {} failed: {}"
|
||||
.format(_action, action_id.data))
|
||||
zaza.model.run_action_on_leader(
|
||||
self.application_name,
|
||||
'cleanup',
|
||||
raise_on_failure=True)
|
||||
|
||||
def test_910_pause_and_resume(self):
|
||||
"""The services can be paused and resumed."""
|
||||
logging.debug('Checking pause and resume actions...')
|
||||
|
||||
status = zaza.model.get_status().applications[self.application_name]
|
||||
|
||||
# libjuju juju status no longer has units for subordinate charms
|
||||
# Use the application it is subordinate-to to check workload status
|
||||
if status.get("units") is None and status.get("subordinate-to"):
|
||||
primary_status = juju_utils.get_application_status(
|
||||
status.get("subordinate-to")[0])
|
||||
leader = None
|
||||
for unit in primary_status["units"]:
|
||||
if primary_status["units"][unit].get('leader'):
|
||||
leader = unit
|
||||
|
||||
if primary_status["units"][leader].get("subordinates"):
|
||||
for subordinate in primary_status["units"][leader]["subordinates"]:
|
||||
# mysql-router is a subordinate from focal onwards
|
||||
_app = subordinate.split('/')[0]
|
||||
if _app != 'hacluster':
|
||||
continue
|
||||
logging.info("Pausing {}".format(subordinate))
|
||||
zaza.model.run_action(subordinate, "pause")
|
||||
zaza.model.block_until_unit_wl_status(
|
||||
subordinate,
|
||||
"maintenance")
|
||||
|
||||
logging.info("Resuming {}".format(subordinate))
|
||||
zaza.model.run_action(subordinate, "resume")
|
||||
zaza.model.block_until_unit_wl_status(subordinate, "active")
|
||||
|
||||
_states = {"hacluster": {
|
||||
"workload-status": "active",
|
||||
"workload-status-message": "Unit is ready and clustered"}}
|
||||
zaza.model.wait_for_application_states(states=_states)
|
||||
logging.debug('OK')
|
||||
with self.pause_resume([]):
|
||||
logging.info("Testing pause resume")
|
||||
|
||||
def _toggle_maintenance_and_wait(self, expected):
|
||||
"""Configure cluster maintenance-mode.
|
||||
|
||||
Reference in New Issue
Block a user