diff --git a/zaza/openstack/charm_tests/mysql/tests.py b/zaza/openstack/charm_tests/mysql/tests.py index 5cbc42d..7603392 100644 --- a/zaza/openstack/charm_tests/mysql/tests.py +++ b/zaza/openstack/charm_tests/mysql/tests.py @@ -410,16 +410,15 @@ class MySQLInnoDBClusterTests(MySQLCommonTests): Run the cluster-status action. """ - # Update which node is the leader and which are not - _leaders, _non_leaders = self.get_leaders_and_non_leaders() logging.info("Execute cluster-status action") - action = zaza.model.run_action( - _non_leaders[0], + action = zaza.model.run_action_on_leader( + self.application, "cluster-status", action_params={}) cluster_status = json.loads(action.data["results"]["cluster-status"]) assert "OK" in cluster_status["defaultReplicaSet"]["status"], ( - "Cluster status action failed.") + "Cluster status action failed: {}" + .format(action.data)) logging.info("Passed cluster-status action test.") def test_110_mysqldump(self): @@ -429,14 +428,125 @@ class MySQLInnoDBClusterTests(MySQLCommonTests): """ _db = "keystone" _file_key = "mysqldump-file" - # Update which node is the leader and which are not - _leaders, _non_leaders = self.get_leaders_and_non_leaders() logging.info("Execute mysqldump action") - action = zaza.model.run_action( - _non_leaders[0], + action = zaza.model.run_action_on_leader( + self.application, "mysqldump", action_params={"databases": _db}) _results = action.data["results"] assert _db in _results[_file_key], ( - "Mysqldump action failed.") + "Mysqldump action failed: {}".format(action.data)) logging.info("Passed mysqldump action test.") + + def test_120_set_cluster_option(self): + """Set cluster option. + + Run the set-cluster-option action. + """ + _key = "autoRejoinTries" + _value = "500" + logging.info("Set cluster option {}={}".format(_key, _value)) + action = zaza.model.run_action_on_leader( + self.application, + "set-cluster-option", + action_params={"key": _key, "value": _value}) + assert "Success" in action.data["results"]["outcome"], ( + "Set cluster option {}={} action failed: {}" + .format(_key, _value, action.data)) + logging.info("Passed set cluster option action test.") + + +class MySQLInnoDBClusterColdStartTest(MySQLBaseTest): + """Percona Cluster cold start tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running percona-cluster cold start tests.""" + super().setUpClass() + cls.application = "mysql-innodb-cluster" + cls.overcloud_keystone_session = ( + openstack_utils.get_undercloud_keystone_session()) + cls.nova_client = openstack_utils.get_nova_session_client( + cls.overcloud_keystone_session) + + def resolve_update_status_errors(self): + """Resolve update-status hooks error. + + This should *only* be used after an instance hard reboot to handle the + situation where a update-status hook was running when the unit was + rebooted. + """ + zaza.model.resolve_units( + application_name=self.application, + erred_hook='update-status', + wait=True) + + def test_100_reboot_cluster_from_complete_outage(self): + """Reboot cluster from complete outage. + + After a cold start, reboot cluster from complete outage. + """ + _machines = list( + juju_utils.get_machine_uuids_for_application(self.application)) + # Stop Nodes + _machines.sort() + # Avoid hitting an update-status hook + logging.debug("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + logging.info("Stopping instances: {}".format(_machines)) + for uuid in _machines: + self.nova_client.servers.stop(uuid) + logging.debug("Wait till all machines are shutoff ...") + for uuid in _machines: + openstack_utils.resource_reaches_status(self.nova_client.servers, + uuid, + expected_status='SHUTOFF', + stop_after_attempt=16) + + # Start nodes + _machines.sort(reverse=True) + logging.info("Starting instances: {}".format(_machines)) + for uuid in _machines: + self.nova_client.servers.start(uuid) + + for unit in zaza.model.get_units(self.application): + zaza.model.block_until_unit_wl_status( + unit.entity_id, + 'unknown', + negate_match=True) + + logging.debug("Wait till model is idle ...") + try: + zaza.model.block_until_all_units_idle() + except zaza.model.UnitError: + self.resolve_update_status_errors() + zaza.model.block_until_all_units_idle() + + logging.debug("Wait for application states ...") + for unit in zaza.model.get_units(self.application): + try: + zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") + except zaza.model.UnitError: + self.resolve_update_status_errors() + zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") + states = {self.application: { + "workload-status": "blocked", + "workload-status-message": + "MySQL InnoDB Cluster not healthy: None"}} + zaza.model.wait_for_application_states(states=states) + + logging.info("Execute reboot-cluster-from-complete-outage " + "action after cold boot ...") + action = zaza.model.run_action_on_leader( + self.application, + "reboot-cluster-from-complete-outage", + action_params={}) + assert "Success" in action.data["results"]["outcome"], ( + "Reboot cluster from complete outage action failed: {}" + .format(action.data)) + logging.debug("Wait for application states ...") + for unit in zaza.model.get_units(self.application): + zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get("target_deploy_status", {}))