Add cleanup to the unit removal case

For some reason test cases are sometimes executed out of order and so
test_921_remove_unit is sometimes run before the pause_and_resume test
case which results in an error.

While the root cause for it must be found it would also be good to avoid
side-effects in individual test cases and return the environment back to
its original state.

There is no 'start' hook implementation for charm-rabbitmq-server,
however, changes that close to the 20.05 release are discouraged so this
change uses an upgrade-charm event simulation to re-trigger the addition
of a unit (which was previously removed) to the cluster.

NOTE: after an execution of hooks/upgrade-charm finishes, the charm will
stay in the waiting state with the following status until the next
update-status event: 'Unit has peers, but RabbitMQ not clustered'

Related-Bug: #1730709
This commit is contained in:
Dmitrii Shcherbakov
2020-05-18 19:58:16 +03:00
parent ac761f026a
commit dfc6235362

View File

@@ -343,11 +343,14 @@ class RmqTests(test_utils.OpenStackBaseTest):
def _retry_check_unit_cluster_nodes(self, u, unit_node_names):
return rmq_utils.check_unit_cluster_nodes(u, unit_node_names)
def test_921_remove_unit(self):
def test_921_remove_and_add_unit(self):
"""Test if unit cleans up when removed from Rmq cluster.
Test if a unit correctly cleans up by removing itself from the
RabbitMQ cluster on removal
RabbitMQ cluster on removal.
Add the unit back to the cluster at the end of the test case to
avoid side-effects.
"""
logging.info('Checking that units correctly clean up after '
@@ -356,25 +359,48 @@ class RmqTests(test_utils.OpenStackBaseTest):
zaza.model.set_application_config('rabbitmq-server', config)
rmq_utils.wait_for_cluster()
units = zaza.model.get_units(self.application_name)
removed_unit = units[-1]
left_units = units[:-1]
all_units = zaza.model.get_units(self.application_name)
removed_unit = all_units[-1]
left_units = all_units[:-1]
logging.info('Simulating unit {} removal'.format(removed_unit))
zaza.model.run_on_unit(removed_unit.entity_id, 'hooks/stop')
logging.info('Waiting until unit {} reaches "waiting" state'
''.format(removed_unit))
zaza.model.block_until_unit_wl_status(removed_unit.entity_id,
"waiting")
unit_host_names = generic_utils.get_unit_hostnames(left_units)
unit_node_names = []
for unit in unit_host_names:
unit_node_names.append('rabbit@{}'.format(unit_host_names[unit]))
errors = []
def check_units(units):
unit_host_names = generic_utils.get_unit_hostnames(units)
unit_node_names = []
for unit in unit_host_names:
unit_node_names.append('rabbit@{}'.format(
unit_host_names[unit]))
errors = []
for u in left_units:
e = self._retry_check_unit_cluster_nodes(u,
unit_node_names)
if e:
errors.append(e)
for u in units:
e = self._retry_check_unit_cluster_nodes(u,
unit_node_names)
if e:
errors.append(e)
self.assertFalse(errors, msg=errors)
logging.info('Checking that all units except for {} are present'
'in the cluster'.format(removed_unit))
check_units(left_units)
logging.info('Re-adding the removed unit {} back to the cluster'
'by simulating the upgrade-charm event'
''.format(removed_unit))
# TODO(dmitriis): Fix the rabbitmq charm to add a proper way to add a
# unit back to the cluster and replace this.
zaza.model.run_on_unit(removed_unit.entity_id, 'hooks/upgrade-charm')
logging.info('Waiting until unit {} reaches "active" state'
''.format(removed_unit))
zaza.model.block_until_unit_wl_status(removed_unit.entity_id,
"active")
logging.info('Checking that all units are present in the cluster')
check_units(all_units)
self.assertFalse(errors, msg=errors)
logging.info('OK')