Merge branch 'master' into hacluster-scaleback

This commit is contained in:
Aurelien Lourot
2021-02-08 15:20:53 +01:00
38 changed files with 1754 additions and 452 deletions

16
tox.ini
View File

@@ -1,6 +1,22 @@
[tox]
envlist = pep8, py3
skipsdist = True
# NOTE: Avoid build/test env pollution by not enabling sitepackages.
sitepackages = False
# NOTE: Avoid false positives by not skipping missing interpreters.
skip_missing_interpreters = False
# NOTES:
# * We avoid the new dependency resolver by pinning pip < 20.3, see
# https://github.com/pypa/pip/issues/9187
# * Pinning dependencies requires tox >= 3.2.0, see
# https://tox.readthedocs.io/en/latest/config.html#conf-requires
# * It is also necessary to pin virtualenv as a newer virtualenv would still
# lead to fetching the latest pip in the func* tox targets, see
# https://stackoverflow.com/a/38133283
requires = pip < 20.3
virtualenv < 20.0
# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci
minversion = 3.2.0
[testenv]
setenv = VIRTUAL_ENV={envdir}

View File

@@ -11,3 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest.mock as mock
sys.modules['zaza.utilities.maas'] = mock.MagicMock()

View File

@@ -118,7 +118,7 @@ class TestCephUtils(ut_utils.BaseTestCase):
model_name='amodel')
def test_pools_from_broker_req(self):
self.patch_object(ceph_utils.zaza_juju, 'get_relation_from_unit')
self.patch_object(ceph_utils.juju_utils, 'get_relation_from_unit')
self.get_relation_from_unit.return_value = {
'broker_req': (
'{"api-version": 1, "ops": ['

View File

@@ -17,6 +17,8 @@ import datetime
import io
import mock
import subprocess
import sys
import unittest
import tenacity
import unit_tests.utils as ut_utils
@@ -191,6 +193,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.patch_object(openstack_utils, 'get_application_config_option')
self.patch_object(openstack_utils, 'get_keystone_ip')
self.patch_object(openstack_utils, "get_current_os_versions")
self.patch_object(openstack_utils, "get_remote_ca_cert_file")
self.patch_object(openstack_utils.juju_utils, 'leader_get')
if tls_relation:
self.patch_object(openstack_utils.model, "scp_from_unit")
@@ -204,6 +207,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.get_relation_id.return_value = None
self.get_application_config_option.return_value = None
self.leader_get.return_value = 'openstack'
self.get_remote_ca_cert_file.return_value = None
if tls_relation or ssl_cert:
port = 35357
transport = 'https'
@@ -245,7 +249,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
'API_VERSION': 3,
}
if tls_relation:
expect['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT
self.get_remote_ca_cert_file.return_value = '/tmp/a.cert'
expect['OS_CACERT'] = '/tmp/a.cert'
self.assertEqual(openstack_utils.get_overcloud_auth(),
expect)
@@ -908,6 +913,14 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
release_comp = xenial_queens > xenial_mitaka
self.assertTrue(release_comp)
# Check specifying an application
self._get_os_rel_pair.reset_mock()
self._get_os_rel_pair.return_value = 'xenial_mitaka'
expected = 4
result = openstack_utils.get_os_release(application='myapp')
self.assertEqual(expected, result)
self._get_os_rel_pair.assert_called_once_with(application='myapp')
def test_get_keystone_api_version(self):
self.patch_object(openstack_utils, "get_current_os_versions")
self.patch_object(openstack_utils, "get_application_config_option")
@@ -1254,34 +1267,230 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.get_application.side_effect = KeyError
self.assertFalse(openstack_utils.ngw_present())
def test_configure_gateway_ext_port(self):
# FIXME: this is not a complete unit test for the function as one did
# not exist at all I'm adding this to test one bit and we'll add more
# as we go.
def test_get_charm_networking_data(self):
self.patch_object(openstack_utils, 'deprecated_external_networking')
self.patch_object(openstack_utils, 'dvr_enabled')
self.patch_object(openstack_utils, 'ovn_present')
self.patch_object(openstack_utils, 'ngw_present')
self.patch_object(openstack_utils, 'get_ovs_uuids')
self.patch_object(openstack_utils, 'get_gateway_uuids')
self.patch_object(openstack_utils, 'get_admin_net')
self.patch_object(openstack_utils, 'get_ovn_uuids')
self.patch_object(openstack_utils.model, 'get_application')
self.dvr_enabled.return_value = False
self.ovn_present.return_value = False
self.ngw_present.return_value = True
self.get_admin_net.return_value = {'id': 'fakeid'}
self.ngw_present.return_value = False
self.get_ovs_uuids.return_value = []
self.get_gateway_uuids.return_value = []
self.get_ovn_uuids.return_value = []
self.get_application.side_effect = KeyError
novaclient = mock.MagicMock()
neutronclient = mock.MagicMock()
def _fake_empty_generator(empty=True):
if empty:
return
yield
self.get_gateway_uuids.side_effect = _fake_empty_generator
with self.assertRaises(RuntimeError):
openstack_utils.configure_gateway_ext_port(
novaclient, neutronclient)
# provide a uuid and check that we don't raise RuntimeError
self.get_gateway_uuids.side_effect = ['fake-uuid']
openstack_utils.configure_gateway_ext_port(
novaclient, neutronclient)
openstack_utils.get_charm_networking_data()
self.ngw_present.return_value = True
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVS,
['neutron-gateway'],
mock.ANY,
'data-port',
{}))
self.dvr_enabled.return_value = True
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVS_DVR,
['neutron-gateway', 'neutron-openvswitch'],
mock.ANY,
'data-port',
{}))
self.ngw_present.return_value = False
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVS_DVR_SNAT,
['neutron-openvswitch'],
mock.ANY,
'data-port',
{}))
self.dvr_enabled.return_value = False
self.ovn_present.return_value = True
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVN,
['ovn-chassis'],
mock.ANY,
'bridge-interface-mappings',
{'ovn-bridge-mappings': 'physnet1:br-ex'}))
self.get_application.side_effect = None
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVN,
['ovn-chassis', 'ovn-dedicated-chassis'],
mock.ANY,
'bridge-interface-mappings',
{'ovn-bridge-mappings': 'physnet1:br-ex'}))
def test_get_cacert_absolute_path(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir')
self.get_tmpdir.return_value = '/tmp/default'
self.assertEqual(
openstack_utils.get_cacert_absolute_path('filename'),
'/tmp/default/filename')
def test_get_cacert(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir')
self.get_tmpdir.return_value = '/tmp/default'
self.patch_object(openstack_utils.os.path, 'exists')
results = {
'/tmp/default/vault_juju_ca_cert.crt': True}
self.exists.side_effect = lambda x: results[x]
self.assertEqual(
openstack_utils.get_cacert(),
'/tmp/default/vault_juju_ca_cert.crt')
results = {
'/tmp/default/vault_juju_ca_cert.crt': False,
'/tmp/default/keystone_juju_ca_cert.crt': True}
self.assertEqual(
openstack_utils.get_cacert(),
'/tmp/default/keystone_juju_ca_cert.crt')
results = {
'/tmp/default/vault_juju_ca_cert.crt': False,
'/tmp/default/keystone_juju_ca_cert.crt': False}
self.assertIsNone(openstack_utils.get_cacert())
def test_get_remote_ca_cert_file(self):
self.patch_object(openstack_utils.model, 'get_first_unit_name')
self.patch_object(
openstack_utils,
'_get_remote_ca_cert_file_candidates')
self.patch_object(openstack_utils.model, 'scp_from_unit')
self.patch_object(openstack_utils.os.path, 'exists')
self.patch_object(openstack_utils.shutil, 'move')
self.patch_object(openstack_utils.os, 'chmod')
self.patch_object(openstack_utils.tempfile, 'NamedTemporaryFile')
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir')
self.get_tmpdir.return_value = '/tmp/default'
enter_mock = mock.MagicMock()
enter_mock.__enter__.return_value.name = 'tempfilename'
self.NamedTemporaryFile.return_value = enter_mock
self.get_first_unit_name.return_value = 'neutron-api/0'
self._get_remote_ca_cert_file_candidates.return_value = [
'/tmp/ca1.cert']
self.exists.return_value = True
openstack_utils.get_remote_ca_cert_file('neutron-api')
self.scp_from_unit.assert_called_once_with(
'neutron-api/0',
'/tmp/ca1.cert',
'tempfilename')
self.chmod.assert_called_once_with('/tmp/default/ca1.cert', 0o644)
self.move.assert_called_once_with(
'tempfilename', '/tmp/default/ca1.cert')
class TestAsyncOpenstackUtils(ut_utils.AioTestCase):
def setUp(self):
super(TestAsyncOpenstackUtils, self).setUp()
if sys.version_info < (3, 6, 0):
raise unittest.SkipTest("Can't AsyncMock in py35")
model_mock = mock.MagicMock()
test_mock = mock.MagicMock()
class AsyncContextManagerMock(test_mock):
async def __aenter__(self):
return self
async def __aexit__(self, *args):
pass
self.model_mock = model_mock
self.patch_object(openstack_utils.zaza.model, "async_block_until")
async def _block_until(f, timeout):
# Store the result of the call to _check_ca_present to validate
# tests
self.result = await f()
self.async_block_until.side_effect = _block_until
self.patch('zaza.model.run_in_model', name='_run_in_model')
self._run_in_model.return_value = AsyncContextManagerMock
self._run_in_model().__aenter__.return_value = self.model_mock
async def test_async_block_until_ca_exists(self):
def _get_action_output(stdout, code, stderr=None):
stderr = stderr or ''
action = mock.MagicMock()
action.data = {
'results': {
'Code': code,
'Stderr': stderr,
'Stdout': stdout}}
return action
results = {
'/tmp/missing.cert': _get_action_output(
'',
'1',
'cat: /tmp/missing.cert: No such file or directory'),
'/tmp/good.cert': _get_action_output('CERTIFICATE', '0')}
async def _run(command, timeout=None):
return results[command.split()[-1]]
self.unit1 = mock.MagicMock()
self.unit2 = mock.MagicMock()
self.unit2.run.side_effect = _run
self.unit1.run.side_effect = _run
self.units = [self.unit1, self.unit2]
_units = mock.MagicMock()
_units.units = self.units
self.model_mock.applications = {
'keystone': _units
}
self.patch_object(
openstack_utils,
"_async_get_remote_ca_cert_file_candidates")
# Test a missing cert then a good cert.
self._async_get_remote_ca_cert_file_candidates.return_value = [
'/tmp/missing.cert',
'/tmp/good.cert']
await openstack_utils.async_block_until_ca_exists(
'keystone',
'CERTIFICATE')
self.assertTrue(self.result)
# Test a single missing
self._async_get_remote_ca_cert_file_candidates.return_value = [
'/tmp/missing.cert']
await openstack_utils.async_block_until_ca_exists(
'keystone',
'CERTIFICATE')
self.assertFalse(self.result)
async def test__async_get_remote_ca_cert_file_candidates(self):
self.patch_object(openstack_utils.zaza.model, "async_get_relation_id")
rel_id_out = {
}
def _get_relation_id(app, cert_app, model_name, remote_interface_name):
return rel_id_out[cert_app]
self.async_get_relation_id.side_effect = _get_relation_id
rel_id_out['vault'] = 'certs:1'
r = await openstack_utils._async_get_remote_ca_cert_file_candidates(
'neutron-api', 'mymodel')
self.assertEqual(
r,
['/usr/local/share/ca-certificates/vault_juju_ca_cert.crt',
'/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'])
rel_id_out['vault'] = None
r = await openstack_utils._async_get_remote_ca_cert_file_candidates(
'neutron-api', 'mymodel')
self.assertEqual(
r,
['/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'])

View File

@@ -44,25 +44,37 @@ class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
self.patch_object(
openstack_upgrade.zaza.model,
"get_application_config")
self.patch_object(
openstack_upgrade.zaza.model,
"block_until_all_units_idle")
self.patch_object(
openstack_upgrade,
"block_until_mysql_innodb_cluster_has_rw")
def _get_application_config(app, model_name=None):
app_config = {
'ceph-mon': {'verbose': True, 'source': 'old-src'},
'neutron-openvswitch': {'verbose': True},
'ntp': {'verbose': True},
'percona-cluster': {'verbose': True, 'source': 'old-src'},
'ceph-mon': {'verbose': {'value': True},
'source': {'value': 'old-src'}},
'neutron-openvswitch': {'verbose': {'value': True}},
'ntp': {'verbose': {'value': True}},
'percona-cluster': {'verbose': {'value': True},
'source': {'value': 'old-src'}},
'cinder': {
'verbose': True,
'openstack-origin': 'old-src',
'action-managed-upgrade': False},
'verbose': {'value': True},
'openstack-origin': {'value': 'old-src'},
'action-managed-upgrade': {'value': False}},
'neutron-api': {
'verbose': True,
'openstack-origin': 'old-src',
'action-managed-upgrade': False},
'verbose': {'value': True},
'openstack-origin': {'value': 'old-src'},
'action-managed-upgrade': {'value': False}},
'nova-compute': {
'verbose': True,
'openstack-origin': 'old-src',
'action-managed-upgrade': False},
'verbose': {'value': True},
'openstack-origin': {'value': 'old-src'},
'action-managed-upgrade': {'value': False}},
'mysql-innodb-cluster': {
'verbose': {'value': True},
'source': {'value': 'old-src'},
'action-managed-upgrade': {'value': True}},
}
return app_config[app]
self.get_application_config.side_effect = _get_application_config
@@ -74,6 +86,10 @@ class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
'subordinate-to': 'nova-compute'},
'ntp': { # Filter as it has no source option
'charm': 'cs:ntp'},
'mysql-innodb-cluster': {
'charm': 'cs:mysql-innodb-cluster',
'units': {
'mysql-innodb-cluster/0': {}}},
'nova-compute': {
'charm': 'cs:nova-compute',
'units': {
@@ -115,7 +131,7 @@ class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
model_name=None,
raise_on_failure=True)
def test_action_upgrade_group(self):
def test_action_upgrade_apps(self):
self.patch_object(openstack_upgrade, "pause_units")
self.patch_object(openstack_upgrade, "action_unit_upgrade")
self.patch_object(openstack_upgrade, "resume_units")
@@ -127,7 +143,7 @@ class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
'nova-compute': [mock_nova_compute_0],
'cinder': [mock_cinder_1]}
self.get_units.side_effect = lambda app, model_name: units[app]
openstack_upgrade.action_upgrade_group(['nova-compute', 'cinder'])
openstack_upgrade.action_upgrade_apps(['nova-compute', 'cinder'])
pause_calls = [
mock.call(['cinder-hacluster/0'], model_name=None),
mock.call(['nova-compute/0', 'cinder/1'], model_name=None)]
@@ -142,6 +158,30 @@ class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
mock.call(['cinder-hacluster/0'], model_name=None)]
self.resume_units.assert_has_calls(resume_calls, any_order=False)
def test_action_upgrade_apps_mysql_innodb_cluster(self):
"""Verify that mysql-innodb-cluster is settled before complete."""
self.patch_object(openstack_upgrade, "pause_units")
self.patch_object(openstack_upgrade, "action_unit_upgrade")
self.patch_object(openstack_upgrade, "resume_units")
mock_mysql_innodb_cluster_0 = mock.MagicMock()
mock_mysql_innodb_cluster_0.entity_id = 'mysql-innodb-cluster/0'
units = {'mysql-innodb-cluster': [mock_mysql_innodb_cluster_0]}
self.get_units.side_effect = lambda app, model_name: units[app]
openstack_upgrade.action_upgrade_apps(['mysql-innodb-cluster'])
pause_calls = [
mock.call(['mysql-innodb-cluster/0'], model_name=None)]
self.pause_units.assert_has_calls(pause_calls, any_order=False)
action_unit_upgrade_calls = [
mock.call(['mysql-innodb-cluster/0'], model_name=None)]
self.action_unit_upgrade.assert_has_calls(
action_unit_upgrade_calls,
any_order=False)
resume_calls = [
mock.call(['mysql-innodb-cluster/0'], model_name=None)]
self.resume_units.assert_has_calls(resume_calls, any_order=False)
self.block_until_mysql_innodb_cluster_has_rw.assert_called_once_with(
None)
def test_set_upgrade_application_config(self):
openstack_upgrade.set_upgrade_application_config(
['neutron-api', 'cinder'],
@@ -177,17 +217,23 @@ class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
self.assertFalse(
openstack_upgrade.is_action_upgradable('percona-cluster'))
def test_is_already_upgraded(self):
self.assertTrue(
openstack_upgrade.is_already_upgraded('cinder', 'old-src'))
self.assertFalse(
openstack_upgrade.is_already_upgraded('cinder', 'new-src'))
def test_run_action_upgrade(self):
self.patch_object(openstack_upgrade, "set_upgrade_application_config")
self.patch_object(openstack_upgrade, "action_upgrade_group")
openstack_upgrade.run_action_upgrade(
self.patch_object(openstack_upgrade, "action_upgrade_apps")
openstack_upgrade.run_action_upgrades(
['cinder', 'neutron-api'],
'new-src')
self.set_upgrade_application_config.assert_called_once_with(
['cinder', 'neutron-api'],
'new-src',
model_name=None)
self.action_upgrade_group.assert_called_once_with(
self.action_upgrade_apps.assert_called_once_with(
['cinder', 'neutron-api'],
model_name=None)
@@ -196,7 +242,7 @@ class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
self.patch_object(
openstack_upgrade.zaza.model,
'block_until_all_units_idle')
openstack_upgrade.run_all_in_one_upgrade(
openstack_upgrade.run_all_in_one_upgrades(
['percona-cluster'],
'new-src')
self.set_upgrade_application_config.assert_called_once_with(
@@ -207,34 +253,36 @@ class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
self.block_until_all_units_idle.assert_called_once_with()
def test_run_upgrade(self):
self.patch_object(openstack_upgrade, "run_all_in_one_upgrade")
self.patch_object(openstack_upgrade, "run_action_upgrade")
openstack_upgrade.run_upgrade(
self.patch_object(openstack_upgrade, "run_all_in_one_upgrades")
self.patch_object(openstack_upgrade, "run_action_upgrades")
openstack_upgrade.run_upgrade_on_apps(
['cinder', 'neutron-api', 'ceph-mon'],
'new-src')
self.run_all_in_one_upgrade.assert_called_once_with(
self.run_all_in_one_upgrades.assert_called_once_with(
['ceph-mon'],
'new-src',
model_name=None)
self.run_action_upgrade.assert_called_once_with(
self.run_action_upgrades.assert_called_once_with(
['cinder', 'neutron-api'],
'new-src',
model_name=None)
def test_run_upgrade_tests(self):
self.patch_object(openstack_upgrade, "run_upgrade")
self.patch_object(openstack_upgrade, "run_upgrade_on_apps")
self.patch_object(openstack_upgrade, "get_upgrade_groups")
self.get_upgrade_groups.return_value = {
'Compute': ['nova-compute'],
'Control Plane': ['cinder', 'neutron-api'],
'Core Identity': ['keystone'],
'Storage': ['ceph-mon'],
'sweep_up': ['designate']}
self.get_upgrade_groups.return_value = [
('Compute', ['nova-compute']),
('Control Plane', ['cinder', 'neutron-api']),
('Core Identity', ['keystone']),
('Storage', ['ceph-mon']),
('sweep_up', ['designate'])]
openstack_upgrade.run_upgrade_tests('new-src', model_name=None)
run_upgrade_calls = [
mock.call(['nova-compute'], 'new-src', model_name=None),
mock.call(['cinder', 'neutron-api'], 'new-src', model_name=None),
mock.call(['keystone'], 'new-src', model_name=None),
mock.call(['ceph-mon'], 'new-src', model_name=None),
mock.call(['cinder', 'neutron-api'], 'new-src', model_name=None),
mock.call(['nova-compute'], 'new-src', model_name=None),
mock.call(['designate'], 'new-src', model_name=None)]
self.run_upgrade.assert_has_calls(run_upgrade_calls, any_order=False)
mock.call(['designate'], 'new-src', model_name=None),
]
self.run_upgrade_on_apps.assert_has_calls(
run_upgrade_calls, any_order=False)

View File

@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import mock
import sys
import unittest
@@ -139,28 +138,7 @@ class Test_ParallelSeriesUpgradeSync(ut_utils.BaseTestCase):
self.assertEqual(expected, config)
class AioTestCase(ut_utils.BaseTestCase):
def __init__(self, methodName='runTest', loop=None):
self.loop = loop or asyncio.get_event_loop()
self._function_cache = {}
super(AioTestCase, self).__init__(methodName=methodName)
def coroutine_function_decorator(self, func):
def wrapper(*args, **kw):
return self.loop.run_until_complete(func(*args, **kw))
return wrapper
def __getattribute__(self, item):
attr = object.__getattribute__(self, item)
if asyncio.iscoroutinefunction(attr) and item.startswith('test_'):
if item not in self._function_cache:
self._function_cache[item] = (
self.coroutine_function_decorator(attr))
return self._function_cache[item]
return attr
class TestParallelSeriesUpgrade(AioTestCase):
class TestParallelSeriesUpgrade(ut_utils.AioTestCase):
def setUp(self):
super(TestParallelSeriesUpgrade, self).setUp()
if sys.version_info < (3, 6, 0):
@@ -182,6 +160,8 @@ class TestParallelSeriesUpgrade(AioTestCase):
self.model.async_wait_for_unit_idle = mock.AsyncMock()
self.async_run_on_machine = mock.AsyncMock()
self.model.async_run_on_machine = self.async_run_on_machine
self.model.async_block_until_units_on_machine_are_idle = \
mock.AsyncMock()
@mock.patch.object(upgrade_utils.cl_utils, 'get_class')
async def test_run_post_application_upgrade_functions(
@@ -492,7 +472,13 @@ class TestParallelSeriesUpgrade(AioTestCase):
mock_remove_confdef_file.assert_called_once_with('1')
mock_add_confdef_file.assert_called_once_with('1')
async def test_maybe_pause_things_primary(self):
@mock.patch("asyncio.gather")
async def test_maybe_pause_things_primary(self, mock_gather):
async def _gather(*args):
for f in args:
await f
mock_gather.side_effect = _gather
await upgrade_utils.maybe_pause_things(
FAKE_STATUS,
['app/1', 'app/2'],
@@ -503,7 +489,13 @@ class TestParallelSeriesUpgrade(AioTestCase):
mock.call('app/2', "pause", action_params={}),
])
async def test_maybe_pause_things_subordinates(self):
@mock.patch("asyncio.gather")
async def test_maybe_pause_things_subordinates(self, mock_gather):
async def _gather(*args):
for f in args:
await f
mock_gather.side_effect = _gather
await upgrade_utils.maybe_pause_things(
FAKE_STATUS,
['app/1', 'app/2'],
@@ -514,7 +506,13 @@ class TestParallelSeriesUpgrade(AioTestCase):
mock.call('app-hacluster/2', "pause", action_params={}),
])
async def test_maybe_pause_things_all(self):
@mock.patch("asyncio.gather")
async def test_maybe_pause_things_all(self, mock_gather):
async def _gather(*args):
for f in args:
await f
mock_gather.side_effect = _gather
await upgrade_utils.maybe_pause_things(
FAKE_STATUS,
['app/1', 'app/2'],

View File

@@ -19,6 +19,7 @@
"""Module to provide helper for writing unit tests."""
import asyncio
import contextlib
import io
import mock
@@ -96,3 +97,24 @@ class BaseTestCase(unittest.TestCase):
started.return_value = return_value
self._patches_start[name] = started
setattr(self, name, started)
class AioTestCase(BaseTestCase):
def __init__(self, methodName='runTest', loop=None):
self.loop = loop or asyncio.get_event_loop()
self._function_cache = {}
super(AioTestCase, self).__init__(methodName=methodName)
def coroutine_function_decorator(self, func):
def wrapper(*args, **kw):
return self.loop.run_until_complete(func(*args, **kw))
return wrapper
def __getattribute__(self, item):
attr = object.__getattribute__(self, item)
if asyncio.iscoroutinefunction(attr) and item.startswith('test_'):
if item not in self._function_cache:
self._function_cache[item] = (
self.coroutine_function_decorator(attr))
return self._function_cache[item]
return attr

View File

@@ -64,26 +64,25 @@ class CephISCSIGatewayTest(test_utils.BaseCharmTest):
:rtype: Dict
"""
gw_units = zaza.model.get_units('ceph-iscsi')
primary_gw = gw_units[0]
secondary_gw = gw_units[1]
host_names = generic_utils.get_unit_hostnames(gw_units, fqdn=True)
client_entity_ids = [
u.entity_id for u in zaza.model.get_units('ubuntu')]
ctxt = {
'client_entity_ids': sorted(client_entity_ids),
'gw_iqn': self.GW_IQN,
'gw1_ip': primary_gw.public_address,
'gw1_hostname': host_names[primary_gw.entity_id],
'gw1_entity_id': primary_gw.entity_id,
'gw2_ip': secondary_gw.public_address,
'gw2_hostname': host_names[secondary_gw.entity_id],
'gw2_entity_id': secondary_gw.entity_id,
'chap_creds': 'username={chap_username} password={chap_password}',
'gwcli_gw_dir': '/iscsi-targets/{gw_iqn}/gateways',
'gwcli_hosts_dir': '/iscsi-targets/{gw_iqn}/hosts',
'gwcli_disk_dir': '/disks',
'gwcli_client_dir': '{gwcli_hosts_dir}/{client_initiatorname}',
}
ctxt['gateway_units'] = [
{
'entity_id': u.entity_id,
'ip': u.public_address,
'hostname': host_names[u.entity_id]}
for u in zaza.model.get_units('ceph-iscsi')]
ctxt['gw_ip'] = sorted([g['ip'] for g in ctxt['gateway_units']])[0]
return ctxt
def run_commands(self, unit_name, commands, ctxt):
@@ -116,9 +115,8 @@ class CephISCSIGatewayTest(test_utils.BaseCharmTest):
'ceph-iscsi',
'create-target',
action_params={
'gateway-units': '{} {}'.format(
ctxt['gw1_entity_id'],
ctxt['gw2_entity_id']),
'gateway-units': ' '.join([g['entity_id']
for g in ctxt['gateway_units']]),
'iqn': self.GW_IQN,
'rbd-pool-name': ctxt.get('pool_name', ''),
'ec-rbd-metadata-pool': ctxt.get('ec_meta_pool_name', ''),
@@ -139,7 +137,7 @@ class CephISCSIGatewayTest(test_utils.BaseCharmTest):
base_op_cmd = ('iscsiadm --mode node --targetname {gw_iqn} '
'--op=update ').format(**ctxt)
setup_cmds = [
'iscsiadm -m discovery -t st -p {gw1_ip}',
'iscsiadm -m discovery -t st -p {gw_ip}',
base_op_cmd + '-n node.session.auth.authmethod -v CHAP',
base_op_cmd + '-n node.session.auth.username -v {chap_username}',
base_op_cmd + '-n node.session.auth.password -v {chap_password}',

View File

@@ -136,8 +136,14 @@ class ServiceTest(unittest.TestCase):
def setUp(self):
"""Run test setup."""
# Note: This counter reset is needed because ceph-osd service is
# limited to 3 restarts per 30 mins which is insufficient
# Skip 'service' action tests on systems without systemd
result = zaza_model.run_on_unit(self.TESTED_UNIT, 'which systemctl')
if not result['Stdout']:
raise unittest.SkipTest("'service' action is not supported on "
"systems without 'systemd'. Skipping "
"tests.")
# Note(mkalcok): This counter reset is needed because ceph-osd service
# is limited to 3 restarts per 30 mins which is insufficient
# when running functional tests for 'service' action. This
# limitation is defined in /lib/systemd/system/ceph-osd@.service
# in section [Service] with options 'StartLimitInterval' and
@@ -150,8 +156,8 @@ class ServiceTest(unittest.TestCase):
This ensures that the environment is ready for the next tests.
"""
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'service',
action_params={'start': 'all'},
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start',
action_params={'osds': 'all'},
raise_on_failure=True)
@property
@@ -180,16 +186,16 @@ class ServiceTest(unittest.TestCase):
logging.info("Running 'service stop=all' action on {} "
"unit".format(self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT], 'service',
action_params={'stop': 'all'})
zaza_model.run_action_on_units([self.TESTED_UNIT], 'stop',
action_params={'osds': 'all'})
wait_for_service(unit_name=self.TESTED_UNIT,
services=service_list,
target_status='stopped')
logging.info("Running 'service start=all' action on {} "
"unit".format(self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'service',
action_params={'start': 'all'})
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start',
action_params={'osds': 'all'})
wait_for_service(unit_name=self.TESTED_UNIT,
services=service_list,
target_status='running')
@@ -202,16 +208,16 @@ class ServiceTest(unittest.TestCase):
logging.info("Running 'service stop={}' action on {} "
"unit".format(action_params, self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'service',
action_params={'stop': action_params})
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'stop',
action_params={'osds': action_params})
wait_for_service(unit_name=self.TESTED_UNIT,
services=service_list,
target_status='stopped')
logging.info("Running 'service start={}' action on {} "
"unit".format(action_params, self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'service',
action_params={'start': action_params})
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start',
action_params={'osds': action_params})
wait_for_service(unit_name=self.TESTED_UNIT,
services=service_list,
target_status='running')
@@ -230,8 +236,8 @@ class ServiceTest(unittest.TestCase):
logging.info("Running 'service stop={} on {} "
"unit".format(to_stop.id, self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'service',
action_params={'stop': to_stop.id})
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'stop',
action_params={'osds': to_stop.id})
wait_for_service(unit_name=self.TESTED_UNIT,
services=[to_stop.name, ],
@@ -263,8 +269,8 @@ class ServiceTest(unittest.TestCase):
logging.info("Running 'service start={} on {} "
"unit".format(to_start.id, self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'service',
action_params={'start': to_start.id})
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start',
action_params={'osds': to_start.id})
wait_for_service(unit_name=self.TESTED_UNIT,
services=[to_start.name, ],

View File

@@ -33,7 +33,7 @@ import zaza.model as zaza_model
import zaza.openstack.utilities.ceph as zaza_ceph
import zaza.openstack.utilities.exceptions as zaza_exceptions
import zaza.openstack.utilities.generic as zaza_utils
import zaza.openstack.utilities.juju as zaza_juju
import zaza.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as zaza_openstack
@@ -124,7 +124,7 @@ class CephRelationTest(test_utils.OpenStackBaseTest):
relation_name = 'osd'
remote_unit = zaza_model.get_unit_from_name(remote_unit_name)
remote_ip = remote_unit.public_address
relation = zaza_juju.get_relation_from_unit(
relation = juju_utils.get_relation_from_unit(
unit_name,
remote_unit_name,
relation_name
@@ -153,7 +153,7 @@ class CephRelationTest(test_utils.OpenStackBaseTest):
'ceph-public-address': remote_ip,
'fsid': fsid,
}
relation = zaza_juju.get_relation_from_unit(
relation = juju_utils.get_relation_from_unit(
unit_name,
remote_unit_name,
relation_name
@@ -881,9 +881,13 @@ class BlueStoreCompressionCharmOperation(test_utils.BaseCharmTest):
def setUpClass(cls):
"""Perform class one time initialization."""
super(BlueStoreCompressionCharmOperation, cls).setUpClass()
release_application = 'keystone'
try:
zaza_model.get_application(release_application)
except KeyError:
release_application = 'ceph-mon'
cls.current_release = zaza_openstack.get_os_release(
zaza_openstack.get_current_os_release_pair(
application='ceph-mon'))
application=release_application)
cls.bionic_rocky = zaza_openstack.get_os_release('bionic_rocky')
def setUp(self):
@@ -976,7 +980,7 @@ class BlueStoreCompressionCharmOperation(test_utils.BaseCharmTest):
ceph_pools_detail = zaza_ceph.get_ceph_pool_details(
model_name=self.model_name)
logging.debug('AFTER: {}'.format(ceph_pools_detail))
logging.debug(zaza_juju.get_relation_from_unit(
logging.debug(juju_utils.get_relation_from_unit(
'ceph-mon', self.application_name, None,
model_name=self.model_name))
logging.info('Checking Ceph pool compression_mode after restoring '

View File

@@ -35,6 +35,7 @@ class FullCloudCharmUpgradeTest(unittest.TestCase):
"""Run setup for Charm Upgrades."""
cli_utils.setup_logging()
cls.lts = LTSGuestCreateTest()
cls.lts.setUpClass()
cls.target_charm_namespace = '~openstack-charmers-next'
def get_upgrade_url(self, charm_url):

View File

@@ -23,7 +23,7 @@ import designateclient.v1.records as records
import designateclient.v1.servers as servers
import zaza.model
import zaza.openstack.utilities.juju as zaza_juju
import zaza.utilities.juju as juju_utils
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.designate.utils as designate_utils
@@ -170,7 +170,7 @@ class DesignateAPITests(BaseDesignateTest):
reraise=True
)
def _wait_to_resolve_test_record(self):
dns_ip = zaza_juju.get_relation_from_unit(
dns_ip = juju_utils.get_relation_from_unit(
'designate/0',
'designate-bind/0',
'dns-backend'

View File

@@ -129,9 +129,9 @@ class HaclusterScalebackTest(HaclusterBaseTest):
logging.info('Waiting for model to settle')
zaza.model.block_until_unit_wl_status(other_hacluster_unit, 'active')
# NOTE(lourot): the principle application remains blocked after scaling
# back up until lp:1400481 is solved.
zaza.model.block_until_unit_wl_status(other_principle_unit, 'blocked')
# NOTE(lourot): the principle application sometimes remain blocked
# after scaling back up until lp:1400481 is solved.
# zaza.model.block_until_unit_wl_status(other_principle_unit, 'active')
zaza.model.block_until_all_units_idle()
logging.debug('OK')

View File

@@ -41,9 +41,8 @@ def wait_for_cacert(model_name=None):
:type model_name: str
"""
logging.info("Waiting for cacert")
zaza.model.block_until_file_has_contents(
zaza.openstack.utilities.openstack.block_until_ca_exists(
'keystone',
openstack_utils.KEYSTONE_REMOTE_CACERT,
'CERTIFICATE',
model_name=model_name)
zaza.model.block_until_all_units_idle(model_name=model_name)

View File

@@ -229,7 +229,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
'OS_DOMAIN_NAME': DEMO_DOMAIN,
}
if self.tls_rid:
openrc['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT
openrc['OS_CACERT'] = openstack_utils.get_cacert()
openrc['OS_AUTH_URL'] = (
openrc['OS_AUTH_URL'].replace('http', 'https'))
logging.info('keystone IP {}'.format(ip))
@@ -259,7 +259,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
"""
def _validate_token_data(openrc):
if self.tls_rid:
openrc['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT
openrc['OS_CACERT'] = openstack_utils.get_cacert()
openrc['OS_AUTH_URL'] = (
openrc['OS_AUTH_URL'].replace('http', 'https'))
logging.info('keystone IP {}'.format(ip))
@@ -407,13 +407,22 @@ class LdapTests(BaseKeystoneTest):
'ldap-password': 'crapper',
'ldap-suffix': 'dc=test,dc=com',
'domain-name': 'userdomain',
'ldap-config-flags':
{
'group_tree_dn': 'ou=groups,dc=test,dc=com',
'group_objectclass': 'posixGroup',
'group_name_attribute': 'cn',
'group_member_attribute': 'memberUid',
'group_members_are_ids': 'true',
}
}
def _find_keystone_v3_user(self, username, domain):
def _find_keystone_v3_user(self, username, domain, group=None):
"""Find a user within a specified keystone v3 domain.
:param str username: Username to search for in keystone
:param str domain: username selected from which domain
:param str group: group to search for in keystone for group membership
:return: return username if found
:rtype: Optional[str]
"""
@@ -423,9 +432,15 @@ class LdapTests(BaseKeystoneTest):
openstack_utils.get_overcloud_auth(address=ip))
client = openstack_utils.get_keystone_session_client(session)
domain_users = client.users.list(
domain=client.domains.find(name=domain).id
)
if group is None:
domain_users = client.users.list(
domain=client.domains.find(name=domain).id,
)
else:
domain_users = client.users.list(
domain=client.domains.find(name=domain).id,
group=self._find_keystone_v3_group(group, domain).id,
)
usernames = [u.name.lower() for u in domain_users]
if username.lower() in usernames:
@@ -436,6 +451,33 @@ class LdapTests(BaseKeystoneTest):
)
return None
def _find_keystone_v3_group(self, group, domain):
"""Find a group within a specified keystone v3 domain.
:param str group: Group to search for in keystone
:param str domain: group selected from which domain
:return: return group if found
:rtype: Optional[str]
"""
for ip in self.keystone_ips:
logging.info('Keystone IP {}'.format(ip))
session = openstack_utils.get_keystone_session(
openstack_utils.get_overcloud_auth(address=ip))
client = openstack_utils.get_keystone_session_client(session)
domain_groups = client.groups.list(
domain=client.domains.find(name=domain).id
)
for searched_group in domain_groups:
if searched_group.name.lower() == group.lower():
return searched_group
logging.debug(
"Group {} was not found. Returning None.".format(group)
)
return None
def test_100_keystone_ldap_users(self):
"""Validate basic functionality of keystone API with ldap."""
application_name = 'keystone-ldap'
@@ -474,6 +516,83 @@ class LdapTests(BaseKeystoneTest):
self.assertIsNotNone(
janedoe, "user 'jane doe' was unknown")
def test_101_keystone_ldap_groups(self):
"""Validate basic functionality of keystone API with ldap."""
application_name = 'keystone-ldap'
intended_cfg = self._get_ldap_config()
current_cfg, non_string_cfg = (
self.config_current_separate_non_string_type_keys(
self.non_string_type_keys, intended_cfg, application_name)
)
with self.config_change(
{},
non_string_cfg,
application_name=application_name,
reset_to_charm_default=True):
with self.config_change(
current_cfg,
intended_cfg,
application_name=application_name):
logging.info(
'Waiting for groups to become available in keystone...'
)
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {})
)
with self.v3_keystone_preferred():
# NOTE(arif-ali): Test fixture should have openstack and
# admin groups
openstack_group = self._find_keystone_v3_group(
'openstack', 'userdomain')
self.assertIsNotNone(
openstack_group.name, "group 'openstack' was unknown")
admin_group = self._find_keystone_v3_group(
'admin', 'userdomain')
self.assertIsNotNone(
admin_group.name, "group 'admin' was unknown")
def test_102_keystone_ldap_group_membership(self):
"""Validate basic functionality of keystone API with ldap."""
application_name = 'keystone-ldap'
intended_cfg = self._get_ldap_config()
current_cfg, non_string_cfg = (
self.config_current_separate_non_string_type_keys(
self.non_string_type_keys, intended_cfg, application_name)
)
with self.config_change(
{},
non_string_cfg,
application_name=application_name,
reset_to_charm_default=True):
with self.config_change(
current_cfg,
intended_cfg,
application_name=application_name):
logging.info(
'Waiting for groups to become available in keystone...'
)
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {})
)
with self.v3_keystone_preferred():
# NOTE(arif-ali): Test fixture should have openstack and
# admin groups
openstack_group = self._find_keystone_v3_user(
'john doe', 'userdomain', group='openstack')
self.assertIsNotNone(
openstack_group,
"john doe was not in group 'openstack'")
admin_group = self._find_keystone_v3_user(
'john doe', 'userdomain', group='admin')
self.assertIsNotNone(
admin_group, "'john doe' was not in group 'admin'")
class LdapExplicitCharmConfigTests(LdapTests):
"""Keystone ldap tests."""
@@ -501,9 +620,10 @@ class LdapExplicitCharmConfigTests(LdapTests):
'ldap-user-enabled-invert': False,
'ldap-user-enabled-mask': 0,
'ldap-user-enabled-default': 'True',
'ldap-group-tree-dn': 'ou=groups',
'ldap-group-tree-dn': 'ou=groups,dc=test,dc=com',
'ldap-group-objectclass': '',
'ldap-group-id-attribute': 'cn',
'ldap-group-name-attribute': 'cn',
'ldap-group-member-attribute': 'memberUid',
'ldap-group-members-are-ids': True,
'ldap-config-flags': '{group_objectclass: "posixGroup",'

View File

@@ -20,6 +20,7 @@ import tenacity
from manilaclient import client as manilaclient
import zaza.model
import zaza.openstack.charm_tests.test_utils as test_utils
@@ -35,6 +36,12 @@ class ManilaTests(test_utils.OpenStackBaseTest):
def test_manila_api(self):
"""Test that the Manila API is working."""
# The manila charm contains a 'band-aid' for Bug #1706699 which relies
# on update-status to bring up services if needed. When the tests run
# an update-status hook might not have run so services may still be
# stopped so force a hook execution.
for unit in zaza.model.get_units('manila'):
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
self.assertEqual([], self._list_shares())
@tenacity.retry(

View File

@@ -16,6 +16,8 @@
"""Encapsulate Manila Ganesha testing."""
import logging
from tenacity import Retrying, stop_after_attempt, wait_exponential
from manilaclient import client as manilaclient
@@ -87,6 +89,8 @@ packages:
# Write a file on instance_1
def verify_setup(stdin, stdout, stderr):
status = stdout.channel.recv_exit_status()
if status:
logging.info("{}".format(stderr.readlines()[0].strip()))
self.assertEqual(status, 0)
mount_path = share.export_locations[0]
@@ -119,6 +123,8 @@ packages:
def verify(stdin, stdout, stderr):
status = stdout.channel.recv_exit_status()
if status:
logging.info("{}".format(stderr.readlines()[0].strip()))
self.assertEqual(status, 0)
out = ""
for line in iter(stdout.readline, ""):

View File

@@ -19,8 +19,16 @@ import zaza.model as model
async def complete_cluster_series_upgrade():
"""Run the complete-cluster-series-upgrade action on the lead unit."""
# TODO: Make this work across either mysql or percona-cluster names
await model.async_run_action_on_leader(
'mysql',
'complete-cluster-series-upgrade',
action_params={})
# Note that some models use mysql as the application name, and other's use
# percona-cluster. Try mysql first, and if it doesn't exist, then try
# percona-cluster instead.
try:
await model.async_run_action_on_leader(
'mysql',
'complete-cluster-series-upgrade',
action_params={})
except KeyError:
await model.async_run_action_on_leader(
'percona-cluster',
'complete-cluster-series-upgrade',
action_params={})

View File

@@ -15,6 +15,7 @@
"""Setup for Neutron deployments."""
import functools
import logging
from zaza.openstack.configure import (
network,
@@ -89,12 +90,25 @@ def basic_overcloud_network(limit_gws=None):
'configure_gateway_ext_port_use_juju_wait', True)
# Handle network for OpenStack-on-OpenStack scenarios
if juju_utils.get_provider_type() == "openstack":
provider_type = juju_utils.get_provider_type()
if provider_type == "openstack":
undercloud_ks_sess = openstack_utils.get_undercloud_keystone_session()
network.setup_gateway_ext_port(network_config,
keystone_session=undercloud_ks_sess,
limit_gws=None,
limit_gws=limit_gws,
use_juju_wait=use_juju_wait)
elif provider_type == "maas":
# NOTE(fnordahl): After validation of the MAAS+Netplan Open vSwitch
# integration support, we would most likely want to add multiple modes
# of operation with MAAS.
#
# Perform charm based OVS configuration
openstack_utils.configure_charmed_openstack_on_maas(
network_config, limit_gws=limit_gws)
else:
logging.warning('Unknown Juju provider type, "{}", will not perform'
' charm network configuration.'
.format(provider_type))
# Confugre the overcloud network
network.setup_sdn(network_config, keystone_session=keystone_session)

View File

@@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for configureing nova."""
"""Code for configuring nova."""
import tenacity
import zaza.openstack.utilities.openstack as openstack_utils
from zaza.openstack.utilities import (
@@ -21,6 +23,9 @@ from zaza.openstack.utilities import (
import zaza.openstack.charm_tests.nova.utils as nova_utils
@tenacity.retry(stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(
multiplier=1, min=2, max=10))
def create_flavors(nova_client=None):
"""Create basic flavors.
@@ -43,6 +48,9 @@ def create_flavors(nova_client=None):
flavorid=nova_utils.FLAVORS[flavor]['flavorid'])
@tenacity.retry(stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(
multiplier=1, min=2, max=10))
def manage_ssh_key(nova_client=None):
"""Create basic flavors.

View File

@@ -155,6 +155,29 @@ class NovaCompute(test_utils.OpenStackBaseTest):
self.assertFalse(int(run['Code']) == 0)
class NovaComputeActionTest(test_utils.OpenStackBaseTest):
"""Run nova-compute specific tests.
Add this test class for new nova-compute action
to avoid breaking older version
"""
def test_virsh_audit_action(self):
"""Test virsh-audit action."""
for unit in zaza.model.get_units('nova-compute',
model_name=self.model_name):
logging.info('Running `virsh-audit` action'
' on unit {}'.format(unit.entity_id))
action = zaza.model.run_action(
unit.entity_id,
'virsh-audit',
model_name=self.model_name,
action_params={})
if "failed" in action.data["status"]:
raise Exception(
"The action failed: {}".format(action.data["message"]))
class NovaCloudController(test_utils.OpenStackBaseTest):
"""Run nova-cloud-controller specific tests."""
@@ -472,17 +495,24 @@ class SecurityTests(test_utils.OpenStackBaseTest):
# Changes fixing the below expected failures will be made following
# this initial work to get validation in. There will be bugs targeted
# to each one and resolved independently where possible.
expected_failures = [
'is-volume-encryption-enabled',
'validate-uses-tls-for-glance',
'validate-uses-tls-for-keystone',
]
expected_passes = [
'validate-file-ownership',
'validate-file-permissions',
'validate-uses-keystone',
]
tls_checks = [
'validate-uses-tls-for-glance',
'validate-uses-tls-for-keystone',
]
if zaza.model.get_relation_id(
'nova-cloud-controller',
'vault',
remote_interface_name='certificates'):
expected_passes.extend(tls_checks)
else:
expected_failures.extend(tls_checks)
for unit in zaza.model.get_units(self.application_name,
model_name=self.model_name):
@@ -496,4 +526,4 @@ class SecurityTests(test_utils.OpenStackBaseTest):
action_params={}),
expected_passes,
expected_failures,
expected_to_pass=False)
expected_to_pass=not len(expected_failures))

View File

@@ -25,6 +25,9 @@ import zaza.openstack.charm_tests.glance.setup as glance_setup
import zaza.openstack.utilities.openstack as openstack
import zaza.openstack.configure.guest
import zaza.openstack.charm_tests.nova.setup as nova_setup
import zaza.openstack.charm_tests.nova.utils as nova_utils
def ensure_lts_images():
"""Ensure that bionic and focal images are available for the tests."""
@@ -51,13 +54,32 @@ def add_amphora_image(image_url=None):
def configure_octavia():
"""Do mandatory post deployment configuration of Octavia."""
# Tell Octavia charm it is safe to create cloud resources
logging.info('Running `configure-resources` action on Octavia leader unit')
zaza.model.run_action_on_leader(
'octavia',
'configure-resources',
action_params={})
"""Do post deployment configuration and initialization of Octavia.
Certificates for the private Octavia worker <-> Amphorae communication must
be generated and set trough charm configuration.
The optional SSH configuration options are set to enable debug and log
collection from Amphorae, we will use the same keypair as Zaza uses for
instance creation.
The `configure-resources` action must be run to have the charm create
in-cloud resources such as management network and associated ports and
security groups.
"""
# Set up Nova client to create/retrieve keypair for Amphora debug purposes.
#
# We reuse the Nova setup code for this and in most cases the test
# declaration will already defined that the Nova manage_ssh_key setup
# helper to run before we get here. Re-run here to make sure this setup
# function can be used separately, manage_ssh_key is idempotent.
keystone_session = openstack.get_overcloud_keystone_session()
nova_client = openstack.get_nova_session_client(
keystone_session)
nova_setup.manage_ssh_key(nova_client)
ssh_public_key = openstack.get_public_key(
nova_client, nova_utils.KEYPAIR_NAME)
# Generate certificates for controller/load balancer instance communication
(issuing_cakey, issuing_cacert) = cert.generate_cert(
'OSCI Zaza Issuer',
@@ -71,7 +93,7 @@ def configure_octavia():
issuer_name='OSCI Zaza Octavia Controller',
signing_key=controller_cakey)
controller_bundle = controller_cert + controller_key
cert_config = {
charm_config = {
'lb-mgmt-issuing-cacert': base64.b64encode(
issuing_cacert).decode('utf-8'),
'lb-mgmt-issuing-ca-private-key': base64.b64encode(
@@ -81,6 +103,9 @@ def configure_octavia():
controller_cacert).decode('utf-8'),
'lb-mgmt-controller-cert': base64.b64encode(
controller_bundle).decode('utf-8'),
'amp-ssh-key-name': 'octavia',
'amp-ssh-pub-key': base64.b64encode(
bytes(ssh_public_key, 'utf-8')).decode('utf-8'),
}
logging.info('Configuring certificates for mandatory Octavia '
'client/server authentication '
@@ -93,10 +118,17 @@ def configure_octavia():
_singleton = zaza.openstack.charm_tests.test_utils.OpenStackBaseTest()
_singleton.setUpClass(application_name='octavia')
with _singleton.config_change(cert_config, cert_config):
with _singleton.config_change(charm_config, charm_config):
# wait for configuration to be applied then return
pass
# Tell Octavia charm it is safe to create cloud resources
logging.info('Running `configure-resources` action on Octavia leader unit')
zaza.model.run_action_on_leader(
'octavia',
'configure-resources',
action_params={})
def centralized_fip_network():
"""Create network with centralized router for connecting lb and fips.

View File

@@ -178,6 +178,14 @@ class LBAASv2Test(test_utils.OpenStackBaseTest):
for provider in octavia_client.provider_list().get('providers', [])
if provider['name'] != 'octavia' # alias for `amphora`, skip
}
if (openstack_utils.get_os_release() in [
openstack_utils.get_os_release('focal_victoria'),
openstack_utils.get_os_release('groovy_victoria')]):
logging.info("Skipping tests of ovn backed lb (Bug #1896603)")
try:
del providers['ovn']
except KeyError:
pass
return providers
def _create_lb_resources(self, octavia_client, provider, vip_subnet_id,
@@ -280,7 +288,13 @@ class LBAASv2Test(test_utils.OpenStackBaseTest):
lambda x: octavia_client.member_show(
pool_id=pool_id, member_id=x),
member_id,
operating_status='ONLINE' if monitor else '')
operating_status='')
# Temporarily disable this check until we figure out why
# operational_status sometimes does not become 'ONLINE'
# while the member does indeed work and the subsequent
# retrieval of payload through loadbalancer is successful
# ref LP: #1896729.
# operating_status='ONLINE' if monitor else '')
logging.info(resp)
return lb

View File

@@ -67,7 +67,6 @@ def _login(dashboard_url, domain, username, password, cafile=None):
# start session, get csrftoken
client = requests.session()
client.get(auth_url, verify=cafile)
if 'csrftoken' in client.cookies:
csrftoken = client.cookies['csrftoken']
else:
@@ -163,7 +162,60 @@ def _do_request(request, cafile=None):
return urllib.request.urlopen(request, cafile=cafile)
class OpenStackDashboardTests(test_utils.OpenStackBaseTest):
class OpenStackDashboardBase():
"""Mixin for interacting with Horizon."""
def get_base_url(self):
"""Return the base url for http(s) requests.
:returns: URL
:rtype: str
"""
vip = (zaza_model.get_application_config(self.application_name)
.get("vip").get("value"))
if vip:
ip = vip
else:
unit = zaza_model.get_unit_from_name(
zaza_model.get_lead_unit_name(self.application_name))
ip = unit.public_address
logging.debug("Dashboard ip is:{}".format(ip))
scheme = 'http'
if self.use_https:
scheme = 'https'
url = '{}://{}'.format(scheme, ip)
return url
def get_horizon_url(self):
"""Return the url for acccessing horizon.
:returns: Horizon URL
:rtype: str
"""
url = '{}/horizon'.format(self.get_base_url())
logging.info("Horizon URL is: {}".format(url))
return url
@property
def use_https(self):
"""Whether dashboard is using https.
:returns: Whether dashboard is using https
:rtype: boolean
"""
use_https = False
vault_relation = zaza_model.get_relation_id(
self.application,
'vault',
remote_interface_name='certificates')
if vault_relation:
use_https = True
return use_https
class OpenStackDashboardTests(test_utils.OpenStackBaseTest,
OpenStackDashboardBase):
"""Encapsulate openstack dashboard charm tests."""
@classmethod
@@ -171,13 +223,6 @@ class OpenStackDashboardTests(test_utils.OpenStackBaseTest):
"""Run class setup for running openstack dashboard charm tests."""
super(OpenStackDashboardTests, cls).setUpClass()
cls.application = 'openstack-dashboard'
cls.use_https = False
vault_relation = zaza_model.get_relation_id(
cls.application,
'vault',
remote_interface_name='certificates')
if vault_relation:
cls.use_https = True
def test_050_local_settings_permissions_regression_check_lp1755027(self):
"""Assert regression check lp1755027.
@@ -302,39 +347,6 @@ class OpenStackDashboardTests(test_utils.OpenStackBaseTest):
mismatches.append(msg)
return mismatches
def get_base_url(self):
"""Return the base url for http(s) requests.
:returns: URL
:rtype: str
"""
vip = (zaza_model.get_application_config(self.application_name)
.get("vip").get("value"))
if vip:
ip = vip
else:
unit = zaza_model.get_unit_from_name(
zaza_model.get_lead_unit_name(self.application_name))
ip = unit.public_address
logging.debug("Dashboard ip is:{}".format(ip))
scheme = 'http'
if self.use_https:
scheme = 'https'
url = '{}://{}'.format(scheme, ip)
logging.debug("Base URL is: {}".format(url))
return url
def get_horizon_url(self):
"""Return the url for acccessing horizon.
:returns: Horizon URL
:rtype: str
"""
url = '{}/horizon'.format(self.get_base_url())
logging.info("Horizon URL is: {}".format(url))
return url
def test_400_connection(self):
"""Test that dashboard responds to http request.
@@ -450,7 +462,8 @@ class OpenStackDashboardTests(test_utils.OpenStackBaseTest):
logging.info("Testing pause resume")
class OpenStackDashboardPolicydTests(policyd.BasePolicydSpecialization):
class OpenStackDashboardPolicydTests(policyd.BasePolicydSpecialization,
OpenStackDashboardBase):
"""Test the policyd override using the dashboard."""
good = {
@@ -476,6 +489,7 @@ class OpenStackDashboardPolicydTests(policyd.BasePolicydSpecialization):
super(OpenStackDashboardPolicydTests, cls).setUpClass(
application_name="openstack-dashboard")
cls.application_name = "openstack-dashboard"
cls.application = cls.application_name
def get_client_and_attempt_operation(self, ip):
"""Attempt to list users on the openstack-dashboard service.
@@ -500,7 +514,7 @@ class OpenStackDashboardPolicydTests(policyd.BasePolicydSpecialization):
username = 'admin',
password = overcloud_auth['OS_PASSWORD'],
client, response = _login(
unit.public_address, domain, username, password)
self.get_horizon_url(), domain, username, password)
# now attempt to get the domains page
_url = self.url.format(unit.public_address)
result = client.get(_url)

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for testing openstack upgrades."""

View File

@@ -0,0 +1,148 @@
#!/usr/bin/env python3
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define class for OpenStack Upgrade."""
import logging
import unittest
from zaza.openstack.utilities import (
cli as cli_utils,
upgrade_utils as upgrade_utils,
openstack as openstack_utils,
openstack_upgrade as openstack_upgrade,
)
from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest
class OpenStackUpgradeVMLaunchBase(object):
"""A base class to peform a simple validation on the cloud.
This wraps an OpenStack upgrade with a VM launch before and after the
upgrade.
This test requires a full OpenStack including at least: keystone, glance,
nova-cloud-controller, nova-compute, neutron-gateway, neutron-api and
neutron-openvswitch.
This class should be used as a base class to the upgrade 'test'.
"""
@classmethod
def setUpClass(cls):
"""Run setup for OpenStack Upgrades."""
print("Running OpenStackUpgradeMixin setUpClass")
super().setUpClass()
cls.lts = LTSGuestCreateTest()
cls.lts.setUpClass()
def test_100_validate_pre_openstack_upgrade_cloud(self):
"""Validate pre openstack upgrade."""
logging.info("Validate pre-openstack-upgrade: Spin up LTS instance")
self.lts.test_launch_small_instance()
def test_500_validate_openstack_upgraded_cloud(self):
"""Validate post openstack upgrade."""
logging.info("Validate post-openstack-upgrade: Spin up LTS instance")
self.lts.test_launch_small_instance()
class WaitForMySQL(unittest.TestCase):
"""Helper test to wait on mysql-innodb-cluster to be fully ready.
In practice this means that there is at least on R/W unit available.
Sometimes, after restarting units in the mysql-innodb-cluster, all the
units are R/O until the cluster picks the R/W unit.
"""
@classmethod
def setUpClass(cls):
"""Set up class."""
print("Running OpenstackUpgradeTests setUpClass")
super().setUpClass()
cli_utils.setup_logging()
def test_100_wait_for_happy_mysql_innodb_cluster(self):
"""Wait for mysql cluster to have at least one R/W node."""
logging.info("Starting wait for an R/W unit.")
openstack_upgrade.block_until_mysql_innodb_cluster_has_rw()
logging.info("Done .. all seems well.")
class OpenStackUpgradeTestsFocalUssuri(OpenStackUpgradeVMLaunchBase):
"""Upgrade OpenStack from distro -> cloud:focal-victoria."""
@classmethod
def setUpClass(cls):
"""Run setup for OpenStack Upgrades."""
print("Running OpenstackUpgradeTests setUpClass")
super().setUpClass()
cli_utils.setup_logging()
def test_200_run_openstack_upgrade(self):
"""Run openstack upgrade, but work out what to do."""
openstack_upgrade.run_upgrade_tests("cloud:focal-victoria")
class OpenStackUpgradeTests(OpenStackUpgradeVMLaunchBase):
"""A Principal Class to encapsulate OpenStack Upgrade Tests.
A generic Test class that can discover which Ubuntu version and OpenStack
version to upgrade from.
TODO: Not used at present. Use the declarative tests directly that choose
the version to upgrade to. The functions that this class depends on need a
bit more work regarding how the determine which version to go to.
"""
@classmethod
def setUpClass(cls):
"""Run setup for OpenStack Upgrades."""
print("Running OpenstackUpgradeTests setUpClass")
super().setUpClass()
cli_utils.setup_logging()
def test_200_run_openstack_upgrade(self):
"""Run openstack upgrade, but work out what to do.
TODO: This is really inefficient at the moment, and doesn't (yet)
determine which ubuntu version to work from. Don't use until we can
make it better.
"""
# TODO: work out the most recent Ubuntu version; we assume this is the
# version that OpenStack is running on.
ubuntu_version = "focal"
logging.info("Getting all principle applications ...")
principle_services = upgrade_utils.get_all_principal_applications()
logging.info(
"Getting OpenStack vesions from principal applications ...")
current_versions = openstack_utils.get_current_os_versions(
principle_services)
logging.info("current versions: %s" % current_versions)
# Find the lowest value openstack release across all services and make
# sure all servcies are upgraded to one release higher than the lowest
from_version = upgrade_utils.get_lowest_openstack_version(
current_versions)
logging.info("from version: %s" % from_version)
to_version = upgrade_utils.determine_next_openstack_release(
from_version)[1]
logging.info("to version: %s" % to_version)
# TODO: need to determine the ubuntu base verion that is being upgraded
target_source = upgrade_utils.determine_new_source(
ubuntu_version, from_version, to_version, single_increment=True)
logging.info("target source: %s" % target_source)
assert target_source is not None
openstack_upgrade.run_upgrade_tests(target_source)

View File

@@ -143,6 +143,13 @@ class ChassisCharmOperationTest(BaseCharmOperationTest):
class OVSOVNMigrationTest(test_utils.BaseCharmTest):
"""OVS to OVN migration tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for OVN migration tests."""
super(OVSOVNMigrationTest, cls).setUpClass()
cls.current_release = openstack_utils.get_os_release(
openstack_utils.get_current_os_release_pair())
def setUp(self):
"""Perform migration steps prior to validation."""
super(OVSOVNMigrationTest, self).setUp()
@@ -410,3 +417,17 @@ class OVSOVNMigrationTest(test_utils.BaseCharmTest):
zaza.model.wait_for_agent_status()
zaza.model.wait_for_application_states(
states=self.target_deploy_status)
# Workaround for our old friend LP: #1852221 which hit us again on
# Groovy. We make the os_release check explicit so that we can
# re-evaluate the need for the workaround at the next release.
if self.current_release == openstack_utils.get_os_release(
'groovy_victoria'):
try:
for application in ('ovn-chassis', 'ovn-dedicated-chassis'):
for unit in zaza.model.get_units(application):
zaza.model.run_on_unit(
unit.entity_id,
'systemctl restart ovs-vswitchd')
except KeyError:
# One of the applications is not in the model, which is fine
pass

View File

@@ -337,8 +337,7 @@ class BasePolicydSpecialization(PolicydTest,
logging.info('Authentication for {} on keystone IP {}'
.format(openrc['OS_USERNAME'], ip))
if self.tls_rid:
openrc['OS_CACERT'] = \
openstack_utils.KEYSTONE_LOCAL_CACERT
openrc['OS_CACERT'] = openstack_utils.get_cacert()
openrc['OS_AUTH_URL'] = (
openrc['OS_AUTH_URL'].replace('http', 'https'))
logging.info('keystone IP {}'.format(ip))

View File

@@ -21,6 +21,7 @@ import logging
import os
import sys
import unittest
import juju
from zaza import model
from zaza.openstack.utilities import (
@@ -55,6 +56,12 @@ class ParallelSeriesUpgradeTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run setup for Series Upgrades."""
# NOTE(ajkavanagh): Set the jujulib Connection frame size to 4GB to
# cope with all the outputs from series upgrade; long term, don't send
# that output back, which will require that the upgrade function in the
# charm doesn't capture the output of the upgrade in the action, but
# instead puts it somewhere that can by "juju scp"ed.
juju.client.connection.Connection.MAX_FRAME_SIZE = 2**32
cli_utils.setup_logging()
cls.from_series = None
cls.to_series = None
@@ -89,19 +96,25 @@ class ParallelSeriesUpgradeTest(unittest.TestCase):
upgrade_function = \
parallel_series_upgrade.parallel_series_upgrade
# allow up to 4 parallel upgrades at a time. This is to limit the
# amount of data/calls that asyncio is handling as it's gets
# unstable if all the applications are done at the same time.
sem = asyncio.Semaphore(4)
for charm_name in apps:
charm = applications[charm_name]['charm']
name = upgrade_utils.extract_charm_name_from_url(charm)
upgrade_config = parallel_series_upgrade.app_config(name)
upgrade_functions.append(
upgrade_function(
charm_name,
**upgrade_config,
from_series=from_series,
to_series=to_series,
completed_machines=completed_machines,
workaround_script=workaround_script,
files=files))
wrap_coroutine_with_sem(
sem,
upgrade_function(
charm_name,
**upgrade_config,
from_series=from_series,
to_series=to_series,
completed_machines=completed_machines,
workaround_script=workaround_script,
files=files)))
asyncio.get_event_loop().run_until_complete(
asyncio.gather(*upgrade_functions))
model.block_until_all_units_idle()
@@ -109,6 +122,18 @@ class ParallelSeriesUpgradeTest(unittest.TestCase):
logging.info("Done!")
async def wrap_coroutine_with_sem(sem, coroutine):
"""Wrap a coroutine with a semaphore to limit concurrency.
:param sem: The semaphore to limit concurrency
:type sem: asyncio.Semaphore
:param coroutine: the corouting to limit concurrency
:type coroutine: types.CoroutineType
"""
async with sem:
await coroutine
class OpenStackParallelSeriesUpgrade(ParallelSeriesUpgradeTest):
"""OpenStack Series Upgrade.

View File

@@ -120,7 +120,23 @@ class BaseCharmTest(unittest.TestCase):
@classmethod
def setUpClass(cls, application_name=None, model_alias=None):
"""Run setup for test class to create common resources."""
"""Run setup for test class to create common resources.
Note: the derived class may not use the application_name; if it's set
to None then this setUpClass() method will attempt to extract the
application name from the charm_config (essentially the test.yaml)
using the key 'charm_name' in the test_config. If that isn't present,
then there will be no application_name set, and this is considered a
generic scenario of a whole model rather than a particular charm under
test.
:param application_name: the name of the applications that the derived
class is testing. If None, then it's a generic test not connected
to any single charm.
:type application_name: Optional[str]
:param model_alias: the alias to use if needed.
:type model_alias: Optional[str]
"""
cls.model_aliases = model.get_juju_model_aliases()
if model_alias:
cls.model_name = cls.model_aliases[model_alias]
@@ -131,7 +147,13 @@ class BaseCharmTest(unittest.TestCase):
if application_name:
cls.application_name = application_name
else:
charm_under_test_name = cls.test_config['charm_name']
try:
charm_under_test_name = cls.test_config['charm_name']
except KeyError:
logging.warning("No application_name and no charm config so "
"not setting the application_name. Likely a "
"scenario test.")
return
deployed_app_names = model.sync_deployed(model_name=cls.model_name)
if charm_under_test_name in deployed_app_names:
# There is an application named like the charm under test.

View File

@@ -26,6 +26,7 @@ import zaza.model
import zaza.openstack.utilities.cert
import zaza.openstack.utilities.openstack
import zaza.openstack.utilities.generic
import zaza.openstack.utilities.exceptions as zaza_exceptions
import zaza.utilities.juju as juju_utils
@@ -73,9 +74,27 @@ def basic_setup_and_unseal(cacert=None):
zaza.model.run_on_unit(unit.name, './hooks/update-status')
async def mojo_or_default_unseal_by_unit():
"""Unseal any units reported as sealed using a cacert.
The mojo cacert is tried first, and if that doesn't exist, then the default
zaza located cacert is used.
"""
try:
await mojo_unseal_by_unit()
except zaza_exceptions.CACERTNotFound:
await unseal_by_unit()
def mojo_unseal_by_unit():
"""Unseal any units reported as sealed using mojo cacert."""
cacert = zaza.openstack.utilities.generic.get_mojo_cacert_path()
unseal_by_unit(cacert)
def unseal_by_unit(cacert=None):
"""Unseal any units reported as sealed using mojo cacert."""
cacert = cacert or get_cacert_file()
vault_creds = vault_utils.get_credentails()
for client in vault_utils.get_clients(cacert=cacert):
if client.hvac_client.is_sealed():
@@ -86,9 +105,27 @@ def mojo_unseal_by_unit():
zaza.model.run_on_unit(unit_name, './hooks/update-status')
async def async_mojo_or_default_unseal_by_unit():
"""Unseal any units reported as sealed using a cacert.
The mojo cacert is tried first, and if that doesn't exist, then the default
zaza located cacert is used.
"""
try:
await async_mojo_unseal_by_unit()
except zaza_exceptions.CACERTNotFound:
await async_unseal_by_unit()
async def async_mojo_unseal_by_unit():
"""Unseal any units reported as sealed using mojo cacert."""
cacert = zaza.openstack.utilities.generic.get_mojo_cacert_path()
await async_unseal_by_unit(cacert)
async def async_unseal_by_unit(cacert=None):
"""Unseal any units reported as sealed using vault cacert."""
cacert = cacert or get_cacert_file()
vault_creds = vault_utils.get_credentails()
for client in vault_utils.get_clients(cacert=cacert):
if client.hvac_client.is_sealed():
@@ -137,7 +174,8 @@ def auto_initialize(cacert=None, validation_application='keystone', wait=True):
zaza.model.wait_for_agent_status()
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get('target_deploy_status', {}))
states=test_config.get('target_deploy_status', {}),
timeout=7200)
if validation_application:
validate_ca(cacertificate, application=validation_application)
@@ -184,9 +222,8 @@ def validate_ca(cacertificate, application="keystone", port=5000):
:returns: None
:rtype: None
"""
zaza.model.block_until_file_has_contents(
zaza.openstack.utilities.openstack.block_until_ca_exists(
application,
zaza.openstack.utilities.openstack.KEYSTONE_REMOTE_CACERT,
cacertificate.decode().strip())
vip = (zaza.model.get_application_config(application)
.get("vip").get("value"))

View File

@@ -154,9 +154,8 @@ class VaultTest(BaseVaultTest):
test_config = lifecycle_utils.get_charm_config()
del test_config['target_deploy_status']['vault']
zaza.model.block_until_file_has_contents(
zaza.openstack.utilities.openstack.block_until_ca_exists(
'keystone',
zaza.openstack.utilities.openstack.KEYSTONE_REMOTE_CACERT,
cacert.decode().strip())
zaza.model.wait_for_application_states(
states=test_config.get('target_deploy_status', {}))

View File

@@ -3,7 +3,7 @@ import json
import logging
import zaza.model as zaza_model
import zaza.utilities.juju as zaza_juju
import zaza.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as openstack_utils
@@ -225,7 +225,7 @@ def get_pools_from_broker_req(application_or_unit, model_name=None):
"""
# NOTE: we do not pass on a name for the remote_interface_name as that
# varies between the Ceph consuming applications.
relation_data = zaza_juju.get_relation_from_unit(
relation_data = juju_utils.get_relation_from_unit(
'ceph-mon', application_or_unit, None, model_name=model_name)
# NOTE: we probably should consume the Ceph broker code from c-h but c-h is

View File

@@ -16,6 +16,27 @@
This module contains a number of functions for interacting with OpenStack.
"""
import collections
import copy
import datetime
import enum
import io
import itertools
import juju_wait
import logging
import os
import paramiko
import re
import shutil
import six
import subprocess
import sys
import tempfile
import tenacity
import textwrap
import urllib
from .os_versions import (
OPENSTACK_CODENAMES,
SWIFT_CODENAMES,
@@ -42,27 +63,14 @@ from keystoneauth1.identity import (
import zaza.openstack.utilities.cert as cert
import zaza.utilities.deployment_env as deployment_env
import zaza.utilities.juju as juju_utils
import zaza.utilities.maas
from novaclient import client as novaclient_client
from neutronclient.v2_0 import client as neutronclient
from neutronclient.common import exceptions as neutronexceptions
from octaviaclient.api.v2 import octavia as octaviaclient
from swiftclient import client as swiftclient
import datetime
import io
import itertools
import juju_wait
import logging
import os
import paramiko
import re
import six
import subprocess
import sys
import tempfile
import tenacity
import textwrap
import urllib
from juju.errors import JujuError
import zaza
@@ -175,20 +183,83 @@ WORKLOAD_STATUS_EXCEPTIONS = {
'ceilometer and gnocchi')}}
# For vault TLS certificates
CACERT_FILENAME_FORMAT = "{}_juju_ca_cert.crt"
CERT_PROVIDERS = ['vault']
REMOTE_CERT_DIR = "/usr/local/share/ca-certificates"
KEYSTONE_CACERT = "keystone_juju_ca_cert.crt"
KEYSTONE_REMOTE_CACERT = (
"/usr/local/share/ca-certificates/{}".format(KEYSTONE_CACERT))
KEYSTONE_LOCAL_CACERT = ("tests/{}".format(KEYSTONE_CACERT))
async def async_block_until_ca_exists(application_name, ca_cert,
model_name=None, timeout=2700):
"""Block until a CA cert is on all units of application_name.
:param application_name: Name of application to check
:type application_name: str
:param ca_cert: The certificate content.
:type ca_cert: str
:param model_name: Name of model to query.
:type model_name: str
:param timeout: How long in seconds to wait
:type timeout: int
"""
async def _check_ca_present(model, ca_files):
units = model.applications[application_name].units
for ca_file in ca_files:
for unit in units:
try:
output = await unit.run('cat {}'.format(ca_file))
contents = output.data.get('results').get('Stdout', '')
if ca_cert not in contents:
break
# libjuju throws a generic error for connection failure. So we
# cannot differentiate between a connectivity issue and a
# target file not existing error. For now just assume the
# latter.
except JujuError:
break
else:
# The CA was found in `ca_file` on all units.
return True
else:
return False
ca_files = await _async_get_remote_ca_cert_file_candidates(
application_name,
model_name=model_name)
async with zaza.model.run_in_model(model_name) as model:
await zaza.model.async_block_until(
lambda: _check_ca_present(model, ca_files), timeout=timeout)
block_until_ca_exists = zaza.model.sync_wrapper(async_block_until_ca_exists)
def get_cacert_absolute_path(filename):
"""Build string containing location of the CA Certificate file.
:param filename: Expected filename for CA Certificate file.
:type filename: str
:returns: Absolute path to file containing CA Certificate
:rtype: str
"""
return os.path.join(
deployment_env.get_tmpdir(), filename)
def get_cacert():
"""Return path to CA Certificate bundle for verification during test.
:returns: Path to CA Certificate bundle or None.
:rtype: Optional[str]
:rtype: Union[str, None]
"""
if os.path.exists(KEYSTONE_LOCAL_CACERT):
return KEYSTONE_LOCAL_CACERT
for _provider in CERT_PROVIDERS:
_cert = get_cacert_absolute_path(
CACERT_FILENAME_FORMAT.format(_provider))
if os.path.exists(_cert):
return _cert
_keystone_local_cacert = get_cacert_absolute_path(KEYSTONE_CACERT)
if os.path.exists(_keystone_local_cacert):
return _keystone_local_cacert
# OpenStack Client helpers
@@ -719,6 +790,211 @@ def add_interface_to_netplan(server_name, mac_address):
model.run_on_unit(unit_name, "sudo netplan apply")
class OpenStackNetworkingTopology(enum.Enum):
"""OpenStack Charms Network Topologies."""
ML2_OVS = 'ML2+OVS'
ML2_OVS_DVR = 'ML2+OVS+DVR'
ML2_OVS_DVR_SNAT = 'ML2+OVS+DVR, no dedicated GWs'
ML2_OVN = 'ML2+OVN'
CharmedOpenStackNetworkingData = collections.namedtuple(
'CharmedOpenStackNetworkingData',
[
'topology',
'application_names',
'unit_machine_ids',
'port_config_key',
'other_config',
])
def get_charm_networking_data(limit_gws=None):
"""Inspect Juju model, determine networking topology and return data.
:param limit_gws: Limit the number of gateways that get a port attached
:type limit_gws: Optional[int]
:rtype: CharmedOpenStackNetworkingData[
OpenStackNetworkingTopology,
List[str],
Iterator[str],
str,
Dict[str,str]]
:returns: Named Tuple with networking data, example:
CharmedOpenStackNetworkingData(
OpenStackNetworkingTopology.ML2_OVN,
['ovn-chassis', 'ovn-dedicated-chassis'],
['machine-id-1', 'machine-id-2'], # generator object
'bridge-interface-mappings',
{'ovn-bridge-mappings': 'physnet1:br-ex'})
:raises: RuntimeError
"""
# Initialize defaults, these will be amended to fit the reality of the
# model in the checks below.
topology = OpenStackNetworkingTopology.ML2_OVS
other_config = {}
port_config_key = (
'data-port' if not deprecated_external_networking() else 'ext-port')
unit_machine_ids = []
application_names = []
if dvr_enabled():
if ngw_present():
application_names = ['neutron-gateway', 'neutron-openvswitch']
topology = OpenStackNetworkingTopology.ML2_OVS_DVR
else:
application_names = ['neutron-openvswitch']
topology = OpenStackNetworkingTopology.ML2_OVS_DVR_SNAT
unit_machine_ids = itertools.islice(
itertools.chain(
get_ovs_uuids(),
get_gateway_uuids()),
limit_gws)
elif ngw_present():
unit_machine_ids = itertools.islice(
get_gateway_uuids(), limit_gws)
application_names = ['neutron-gateway']
elif ovn_present():
topology = OpenStackNetworkingTopology.ML2_OVN
unit_machine_ids = itertools.islice(get_ovn_uuids(), limit_gws)
application_names = ['ovn-chassis']
try:
ovn_dc_name = 'ovn-dedicated-chassis'
model.get_application(ovn_dc_name)
application_names.append(ovn_dc_name)
except KeyError:
# ovn-dedicated-chassis not in deployment
pass
port_config_key = 'bridge-interface-mappings'
other_config.update({'ovn-bridge-mappings': 'physnet1:br-ex'})
else:
raise RuntimeError('Unable to determine charm network topology.')
return CharmedOpenStackNetworkingData(
topology,
application_names,
unit_machine_ids,
port_config_key,
other_config)
def create_additional_port_for_machines(novaclient, neutronclient, net_id,
unit_machine_ids,
add_dataport_to_netplan=False):
"""Create additional port for machines for use with external networking.
:param novaclient: Undercloud Authenticated novaclient.
:type novaclient: novaclient.Client object
:param neutronclient: Undercloud Authenticated neutronclient.
:type neutronclient: neutronclient.Client object
:param net_id: Network ID to create ports on.
:type net_id: string
:param unit_machine_ids: Juju provider specific machine IDs for which we
should add ports on.
:type unit_machine_ids: Iterator[str]
:param add_dataport_to_netplan: Whether the newly created port should be
added to instance system configuration so
that it is brought up on instance reboot.
:type add_dataport_to_netplan: Optional[bool]
:returns: List of MAC addresses for created ports.
:rtype: List[str]
:raises: RuntimeError
"""
eligible_machines = 0
for uuid in unit_machine_ids:
eligible_machines += 1
server = novaclient.servers.get(uuid)
ext_port_name = "{}_ext-port".format(server.name)
for port in neutronclient.list_ports(device_id=server.id)['ports']:
if port['name'] == ext_port_name:
logging.warning(
'Instance {} already has additional port, skipping.'
.format(server.id))
break
else:
logging.info('Attaching additional port to instance ("{}"), '
'connected to net id: {}'
.format(uuid, net_id))
body_value = {
"port": {
"admin_state_up": True,
"name": ext_port_name,
"network_id": net_id,
"port_security_enabled": False,
}
}
port = neutronclient.create_port(body=body_value)
server.interface_attach(port_id=port['port']['id'],
net_id=None, fixed_ip=None)
if add_dataport_to_netplan:
mac_address = get_mac_from_port(port, neutronclient)
add_interface_to_netplan(server.name,
mac_address=mac_address)
if not eligible_machines:
# NOTE: unit_machine_ids may be an iterator so testing it for contents
# or length prior to iterating over it is futile.
raise RuntimeError('Unable to determine UUIDs for machines to attach '
'external networking to.')
# Retrieve the just created ports from Neutron so that we can provide our
# caller with their MAC addresses.
return [
port['mac_address']
for port in neutronclient.list_ports(network_id=net_id)['ports']
if 'ext-port' in port['name']
]
def configure_networking_charms(networking_data, macs, use_juju_wait=True):
"""Configure external networking for networking charms.
:param networking_data: Data on networking charm topology.
:type networking_data: CharmedOpenStackNetworkingData
:param macs: MAC addresses of ports for use with external networking.
:type macs: Iterator[str]
:param use_juju_wait: Whether to use juju wait to wait for the model to
settle once the gateway has been configured. Default is True
:type use_juju_wait: Optional[bool]
"""
br_mac_fmt = 'br-ex:{}' if not deprecated_external_networking() else '{}'
br_mac = [
br_mac_fmt.format(mac)
for mac in macs
]
config = copy.deepcopy(networking_data.other_config)
config.update({networking_data.port_config_key: ' '.join(sorted(br_mac))})
for application_name in networking_data.application_names:
logging.info('Setting {} on {}'.format(
config, application_name))
current_data_port = get_application_config_option(
application_name,
networking_data.port_config_key)
if current_data_port == config[networking_data.port_config_key]:
logging.info('Config already set to value')
return
model.set_application_config(
application_name,
configuration=config)
# NOTE(fnordahl): We are stuck with juju_wait until we figure out how
# to deal with all the non ['active', 'idle', 'Unit is ready.']
# workload/agent states and msgs that our mojo specs are exposed to.
if use_juju_wait:
juju_wait.wait(wait_for_workload=True)
else:
zaza.model.wait_for_agent_status()
# TODO: shouldn't access get_charm_config() here as it relies on
# ./tests/tests.yaml existing by default (regardless of the
# fatal=False) ... it's not great design.
test_config = zaza.charm_lifecycle.utils.get_charm_config(
fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get('target_deploy_status', {}))
def configure_gateway_ext_port(novaclient, neutronclient, net_id=None,
add_dataport_to_netplan=False,
limit_gws=None,
@@ -737,123 +1013,46 @@ def configure_gateway_ext_port(novaclient, neutronclient, net_id=None,
settle once the gateway has been configured. Default is True
:type use_juju_wait: boolean
"""
deprecated_extnet_mode = deprecated_external_networking()
port_config_key = 'data-port'
if deprecated_extnet_mode:
port_config_key = 'ext-port'
config = {}
if dvr_enabled():
uuids = itertools.islice(itertools.chain(get_ovs_uuids(),
get_gateway_uuids()),
limit_gws)
networking_data = get_charm_networking_data(limit_gws=limit_gws)
if networking_data.topology in (
OpenStackNetworkingTopology.ML2_OVS_DVR,
OpenStackNetworkingTopology.ML2_OVS_DVR_SNAT):
# If dvr, do not attempt to persist nic in netplan
# https://github.com/openstack-charmers/zaza-openstack-tests/issues/78
add_dataport_to_netplan = False
application_names = ['neutron-openvswitch']
try:
ngw = 'neutron-gateway'
model.get_application(ngw)
application_names.append(ngw)
except KeyError:
# neutron-gateway not in deployment
pass
elif ngw_present():
uuids = itertools.islice(get_gateway_uuids(), limit_gws)
application_names = ['neutron-gateway']
elif ovn_present():
uuids = itertools.islice(get_ovn_uuids(), limit_gws)
application_names = ['ovn-chassis']
try:
ovn_dc_name = 'ovn-dedicated-chassis'
model.get_application(ovn_dc_name)
application_names.append(ovn_dc_name)
except KeyError:
# ovn-dedicated-chassis not in deployment
pass
port_config_key = 'bridge-interface-mappings'
config.update({'ovn-bridge-mappings': 'physnet1:br-ex'})
add_dataport_to_netplan = True
else:
raise RuntimeError('Unable to determine charm network topology.')
if not net_id:
net_id = get_admin_net(neutronclient)['id']
ports_created = 0
for uuid in uuids:
server = novaclient.servers.get(uuid)
ext_port_name = "{}_ext-port".format(server.name)
for port in neutronclient.list_ports(device_id=server.id)['ports']:
if port['name'] == ext_port_name:
logging.warning(
'Neutron Gateway already has additional port')
break
else:
logging.info('Attaching additional port to instance ("{}"), '
'connected to net id: {}'
.format(uuid, net_id))
body_value = {
"port": {
"admin_state_up": True,
"name": ext_port_name,
"network_id": net_id,
"port_security_enabled": False,
}
}
port = neutronclient.create_port(body=body_value)
ports_created += 1
server.interface_attach(port_id=port['port']['id'],
net_id=None, fixed_ip=None)
if add_dataport_to_netplan:
mac_address = get_mac_from_port(port, neutronclient)
add_interface_to_netplan(server.name,
mac_address=mac_address)
if not ports_created:
# NOTE: uuids is an iterator so testing it for contents or length prior
# to iterating over it is futile.
raise RuntimeError('Unable to determine UUIDs for machines to attach '
'external networking to.')
macs = create_additional_port_for_machines(
novaclient, neutronclient, net_id, networking_data.unit_machine_ids,
add_dataport_to_netplan)
ext_br_macs = []
for port in neutronclient.list_ports(network_id=net_id)['ports']:
if 'ext-port' in port['name']:
if deprecated_extnet_mode:
ext_br_macs.append(port['mac_address'])
else:
ext_br_macs.append('br-ex:{}'.format(port['mac_address']))
ext_br_macs.sort()
ext_br_macs_str = ' '.join(ext_br_macs)
if macs:
configure_networking_charms(
networking_data, macs, use_juju_wait=use_juju_wait)
if ext_br_macs:
config.update({port_config_key: ext_br_macs_str})
for application_name in application_names:
logging.info('Setting {} on {}'.format(
config, application_name))
current_data_port = get_application_config_option(application_name,
port_config_key)
if current_data_port == ext_br_macs_str:
logging.info('Config already set to value')
return
model.set_application_config(
application_name,
configuration=config)
# NOTE(fnordahl): We are stuck with juju_wait until we figure out how
# to deal with all the non ['active', 'idle', 'Unit is ready.']
# workload/agent states and msgs that our mojo specs are exposed to.
if use_juju_wait:
juju_wait.wait(wait_for_workload=True)
else:
zaza.model.wait_for_agent_status()
# TODO: shouldn't access get_charm_config() here as it relies on
# ./tests/tests.yaml existing by default (regardless of the
# fatal=False) ... it's not great design.
test_config = zaza.charm_lifecycle.utils.get_charm_config(
fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get('target_deploy_status', {}))
def configure_charmed_openstack_on_maas(network_config, limit_gws=None):
"""Configure networking charms for charm-based OVS config on MAAS provider.
:param network_config: Network configuration as provided in environment.
:type network_config: Dict[str]
:param limit_gws: Limit the number of gateways that get a port attached
:type limit_gws: Optional[int]
"""
networking_data = get_charm_networking_data(limit_gws=limit_gws)
macs = [
mim.mac
for mim in zaza.utilities.maas.get_macs_from_cidr(
zaza.utilities.maas.get_maas_client_from_juju_cloud_data(
zaza.model.get_cloud_data()),
network_config['external_net_cidr'],
link_mode=zaza.utilities.maas.LinkMode.LINK_UP)
]
if macs:
configure_networking_charms(
networking_data, macs, use_juju_wait=False)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
@@ -1554,6 +1753,7 @@ def get_current_os_versions(deployed_applications, model_name=None):
for application in UPGRADE_SERVICES:
if application['name'] not in deployed_applications:
continue
logging.info("looking at application: {}".format(application))
version = generic_utils.get_pkg_version(application['name'],
application['type']['pkg'],
@@ -1602,15 +1802,19 @@ def get_current_os_release_pair(application='keystone'):
return '{}_{}'.format(series, os_version)
def get_os_release(release_pair=None):
def get_os_release(release_pair=None, application='keystone'):
"""Return index of release in OPENSTACK_RELEASES_PAIRS.
:param release_pair: OpenStack release pair eg 'focal_ussuri'
:type release_pair: string
:param application: Name of application to derive release pair from.
:type application: string
:returns: Index of the release
:rtype: int
:raises: exceptions.ReleasePairNotFound
"""
if release_pair is None:
release_pair = get_current_os_release_pair()
release_pair = get_current_os_release_pair(application=application)
try:
index = OPENSTACK_RELEASES_PAIRS.index(release_pair)
except ValueError:
@@ -1811,29 +2015,82 @@ def get_overcloud_auth(address=None, model_name=None):
'OS_PROJECT_DOMAIN_NAME': 'admin_domain',
'API_VERSION': 3,
}
if tls_rid:
unit = model.get_first_unit_name('keystone', model_name=model_name)
# ensure that the path to put the local cacert in actually exists. The
# assumption that 'tests/' exists for, say, mojo is false.
# Needed due to:
# commit: 537473ad3addeaa3d1e4e2d0fd556aeaa4018eb2
_dir = os.path.dirname(KEYSTONE_LOCAL_CACERT)
if not os.path.exists(_dir):
os.makedirs(_dir)
model.scp_from_unit(
unit,
KEYSTONE_REMOTE_CACERT,
KEYSTONE_LOCAL_CACERT)
if os.path.exists(KEYSTONE_LOCAL_CACERT):
os.chmod(KEYSTONE_LOCAL_CACERT, 0o644)
auth_settings['OS_CACERT'] = KEYSTONE_LOCAL_CACERT
local_ca_cert = get_remote_ca_cert_file('keystone', model_name=model_name)
if local_ca_cert:
auth_settings['OS_CACERT'] = local_ca_cert
return auth_settings
async def _async_get_remote_ca_cert_file_candidates(application,
model_name=None):
"""Return a list of possible remote CA file names.
:param application: Name of application to examine.
:type application: str
:param model_name: Name of model to query.
:type model_name: str
:returns: List of paths to possible ca files.
:rtype: List[str]
"""
cert_files = []
for _provider in CERT_PROVIDERS:
tls_rid = await model.async_get_relation_id(
application,
_provider,
model_name=model_name,
remote_interface_name='certificates')
if tls_rid:
cert_files.append(
REMOTE_CERT_DIR + '/' + CACERT_FILENAME_FORMAT.format(
_provider))
cert_files.append(KEYSTONE_REMOTE_CACERT)
return cert_files
_get_remote_ca_cert_file_candidates = zaza.model.sync_wrapper(
_async_get_remote_ca_cert_file_candidates)
def get_remote_ca_cert_file(application, model_name=None):
"""Collect CA certificate from application.
:param application: Name of application to collect file from.
:type application: str
:param model_name: Name of model to query.
:type model_name: str
:returns: Path to cafile
:rtype: str
"""
unit = model.get_first_unit_name(application, model_name=model_name)
local_cert_file = None
cert_files = _get_remote_ca_cert_file_candidates(
application,
model_name=model_name)
for cert_file in cert_files:
_local_cert_file = get_cacert_absolute_path(
os.path.basename(cert_file))
with tempfile.NamedTemporaryFile(mode="w", delete=False) as _tmp_ca:
try:
model.scp_from_unit(
unit,
cert_file,
_tmp_ca.name)
except JujuError:
continue
# ensure that the path to put the local cacert in actually exists.
# The assumption that 'tests/' exists for, say, mojo is false.
# Needed due to:
# commit: 537473ad3addeaa3d1e4e2d0fd556aeaa4018eb2
_dir = os.path.dirname(_local_cert_file)
if not os.path.exists(_dir):
os.makedirs(_dir)
shutil.move(_tmp_ca.name, _local_cert_file)
os.chmod(_local_cert_file, 0o644)
local_cert_file = _local_cert_file
break
return local_cert_file
def get_urllib_opener():
"""Create a urllib opener taking into account proxy settings.

View File

@@ -95,8 +95,8 @@ async def async_action_unit_upgrade(units, model_name=None):
action_unit_upgrade = sync_wrapper(async_action_unit_upgrade)
def action_upgrade_group(applications, model_name=None):
"""Upgrade units using action managed upgrades.
def action_upgrade_apps(applications, model_name=None):
"""Upgrade units in the applications using action managed upgrades.
Upgrade all units of the given applications using action managed upgrades.
This involves the following process:
@@ -142,6 +142,45 @@ def action_upgrade_group(applications, model_name=None):
done.extend(target)
# Ensure that mysql-innodb-cluster has at least one R/W group (it can get
# into a state where all are R/O whilst it is sorting itself out after an
# openstack_upgrade
if "mysql-innodb-cluster" in applications:
block_until_mysql_innodb_cluster_has_rw(model_name)
# Now we need to wait for the model to go back to idle.
zaza.model.block_until_all_units_idle(model_name)
async def async_block_until_mysql_innodb_cluster_has_rw(model=None,
timeout=None):
"""Block until the mysql-innodb-cluster is in a healthy state.
Curiously, after a series of pauses and restarts (e.g. during an upgrade)
the mysql-innodb-cluster charms may not yet have agreed which one is the
R/W node; i.e. they are all R/O. Anyway, eventually they sort it out and
one jumps to the front and says "it's me!". This is detected, externally,
by the status line including R/W in the output.
This function blocks until that happens so that no charm attempts to have a
chat with the mysql server before it has settled, thus breaking the whole
test.
"""
async def async_check_workload_messages_for_rw(model=None):
"""Return True if a least one work message contains R/W."""
status = await zaza.model.async_get_status()
app_status = status.applications.get("mysql-innodb-cluster")
units_data = app_status.units.values()
workload_statuses = [d.workload_status.info for d in units_data]
return any("R/W" in s for s in workload_statuses)
await zaza.model.async_block_until(async_check_workload_messages_for_rw,
timeout=timeout)
block_until_mysql_innodb_cluster_has_rw = sync_wrapper(
async_block_until_mysql_innodb_cluster_has_rw)
def set_upgrade_application_config(applications, new_source,
action_managed=True, model_name=None):
@@ -150,7 +189,7 @@ def set_upgrade_application_config(applications, new_source,
Set the charm config for upgrade.
:param applications: List of application names.
:type applications: []
:type applications: List[str]
:param new_source: New package origin.
:type new_source: str
:param action_managed: Whether to set action-managed-upgrade config option.
@@ -180,8 +219,8 @@ def set_upgrade_application_config(applications, new_source,
def is_action_upgradable(app, model_name=None):
"""Can application be upgraded using action managed upgrade method.
:param new_source: New package origin.
:type new_source: str
:param app: The application to check
:type app: str
:param model_name: Name of model to query.
:type model_name: str
:returns: Whether app be upgraded using action managed upgrade method.
@@ -196,66 +235,95 @@ def is_action_upgradable(app, model_name=None):
return supported
def run_action_upgrade(group, new_source, model_name=None):
def is_already_upgraded(app, new_src, model_name=None):
"""Return True if the app has already been upgraded.
:param app: The application to check
:type app: str
:param new_src: the new source (distro, cloud:x-y, etc.)
:type new_src: str
:param model_name: Name of model to query.
:type model_name: str
:returns: Whether app be upgraded using action managed upgrade method.
:rtype: bool
"""
config = zaza.model.get_application_config(app, model_name=model_name)
try:
src = config['openstack-origin']['value']
key_was = 'openstack-origin'
except KeyError:
src = config['source']['value']
key_was = 'source'
logging.info("origin for {} is {}={}".format(app, key_was, src))
return src == new_src
def run_action_upgrades(apps, new_source, model_name=None):
"""Upgrade payload of all applications in group using action upgrades.
:param group: List of applications to upgrade.
:type group
:param apps: List of applications to upgrade.
:type apps: List[str]
:param new_source: New package origin.
:type new_source: str
:param model_name: Name of model to query.
:type model_name: str
"""
set_upgrade_application_config(group, new_source, model_name=model_name)
action_upgrade_group(group, model_name=model_name)
set_upgrade_application_config(apps, new_source, model_name=model_name)
action_upgrade_apps(apps, model_name=model_name)
def run_all_in_one_upgrade(group, new_source, model_name=None):
def run_all_in_one_upgrades(apps, new_source, model_name=None):
"""Upgrade payload of all applications in group using all-in-one method.
:param group: List of applications to upgrade.
:type group: []
:param apps: List of applications to upgrade.
:type apps: List[str]
:source: New package origin.
:type new_source: str
:param model_name: Name of model to query.
:type model_name: str
"""
set_upgrade_application_config(
group,
apps,
new_source,
model_name=model_name,
action_managed=False)
zaza.model.block_until_all_units_idle()
def run_upgrade(group, new_source, model_name=None):
def run_upgrade_on_apps(apps, new_source, model_name=None):
"""Upgrade payload of all applications in group.
Upgrade apps using action managed upgrades where possible and fallback to
all_in_one method.
:param group: List of applications to upgrade.
:type group: []
:param apps: List of applications to upgrade.
:type apps: []
:param new_source: New package origin.
:type new_source: str
:param model_name: Name of model to query.
:type model_name: str
"""
action_upgrade = []
all_in_one_upgrade = []
for app in group:
action_upgrades = []
all_in_one_upgrades = []
for app in apps:
if is_already_upgraded(app, new_source, model_name=model_name):
logging.info("Application '%s' is already upgraded. Skipping.",
app)
continue
if is_action_upgradable(app, model_name=model_name):
action_upgrade.append(app)
action_upgrades.append(app)
else:
all_in_one_upgrade.append(app)
run_all_in_one_upgrade(
all_in_one_upgrade,
new_source,
model_name=model_name)
run_action_upgrade(
action_upgrade,
new_source,
model_name=model_name)
all_in_one_upgrades.append(app)
if all_in_one_upgrades:
run_all_in_one_upgrades(
all_in_one_upgrades,
new_source,
model_name=model_name)
if action_upgrades:
run_action_upgrades(
action_upgrades,
new_source,
model_name=model_name)
def run_upgrade_tests(new_source, model_name=None):
@@ -270,8 +338,6 @@ def run_upgrade_tests(new_source, model_name=None):
:type model_name: str
"""
groups = get_upgrade_groups(model_name=model_name)
run_upgrade(groups['Core Identity'], new_source, model_name=model_name)
run_upgrade(groups['Storage'], new_source, model_name=model_name)
run_upgrade(groups['Control Plane'], new_source, model_name=model_name)
run_upgrade(groups['Compute'], new_source, model_name=model_name)
run_upgrade(groups['sweep_up'], new_source, model_name=model_name)
for name, apps in groups:
logging.info("Performing upgrade of %s", name)
run_upgrade_on_apps(apps, new_source, model_name=model_name)

View File

@@ -58,7 +58,9 @@ def app_config(charm_name):
}
exceptions = {
'rabbitmq-server': {
'origin': 'source',
# NOTE: AJK disable config-changed on rabbitmq-server due to bug:
# #1896520
'origin': None,
'pause_non_leader_subordinate': False,
'post_application_upgrade_functions': [
('zaza.openstack.charm_tests.rabbitmq_server.utils.'
@@ -94,7 +96,7 @@ def app_config(charm_name):
'pause_non_leader_subordinate': True,
'post_upgrade_functions': [
('zaza.openstack.charm_tests.vault.setup.'
'async_mojo_unseal_by_unit')]
'async_mojo_or_default_unseal_by_unit')]
},
'mongodb': {
'origin': None,
@@ -191,49 +193,64 @@ async def parallel_series_upgrade(
status = (await model.async_get_status()).applications[application]
logging.info(
"Configuring leader / non leaders for {}".format(application))
leader, non_leaders = get_leader_and_non_leaders(status)
for leader_name, leader_unit in leader.items():
leaders, non_leaders = get_leader_and_non_leaders(status)
for leader_unit in leaders.values():
leader_machine = leader_unit["machine"]
leader = leader_name
machines = [
unit["machine"] for name, unit
in non_leaders.items()
if unit['machine'] not in completed_machines]
machines = [unit["machine"] for name, unit in non_leaders.items()
if unit['machine'] not in completed_machines]
await maybe_pause_things(
status,
non_leaders,
pause_non_leader_subordinate,
pause_non_leader_primary)
await series_upgrade_utils.async_set_series(
application, to_series=to_series)
app_idle = [
# wait for the entire application set to be idle before starting upgrades
await asyncio.gather(*[
model.async_wait_for_unit_idle(unit, include_subordinates=True)
for unit in status["units"]
]
await asyncio.gather(*app_idle)
for unit in status["units"]])
await prepare_series_upgrade(leader_machine, to_series=to_series)
prepare_group = [
prepare_series_upgrade(machine, to_series=to_series)
for machine in machines]
await asyncio.gather(*prepare_group)
await asyncio.gather(*[
wait_for_idle_then_prepare_series_upgrade(
machine, to_series=to_series)
for machine in machines])
if leader_machine not in completed_machines:
machines.append(leader_machine)
upgrade_group = [
await asyncio.gather(*[
series_upgrade_machine(
machine,
origin=origin,
application=application,
files=files, workaround_script=workaround_script,
post_upgrade_functions=post_upgrade_functions)
for machine in machines
]
await asyncio.gather(*upgrade_group)
for machine in machines])
completed_machines.extend(machines)
await series_upgrade_utils.async_set_series(
application, to_series=to_series)
await run_post_application_upgrade_functions(
post_application_upgrade_functions)
async def wait_for_idle_then_prepare_series_upgrade(
machine, to_series, model_name=None):
"""Wait for the units to idle the do prepare_series_upgrade.
We need to be sure that all the units are idle prior to actually calling
prepare_series_upgrade() as otherwise the call will fail. It has to be
checked because when the leader is paused it may kick off relation hooks in
the other units in an HA group.
:param machine: the machine that is going to be prepared
:type machine: str
:param to_series: The series to which to upgrade
:type to_series: str
:param model_name: Name of model to query.
:type model_name: str
"""
await model.async_block_until_units_on_machine_are_idle(
machine, model_name=model_name)
await prepare_series_upgrade(machine, to_series=to_series)
async def serial_series_upgrade(
application,
from_series='xenial',
@@ -307,8 +324,10 @@ async def serial_series_upgrade(
non_leaders,
pause_non_leader_subordinate,
pause_non_leader_primary)
logging.info("Finishing pausing application: {}".format(application))
await series_upgrade_utils.async_set_series(
application, to_series=to_series)
logging.info("Finished set series for application: {}".format(application))
if not follower_first and leader_machine not in completed_machines:
await model.async_wait_for_unit_idle(leader, include_subordinates=True)
await prepare_series_upgrade(leader_machine, to_series=to_series)
@@ -321,6 +340,8 @@ async def serial_series_upgrade(
files=files, workaround_script=workaround_script,
post_upgrade_functions=post_upgrade_functions)
completed_machines.append(leader_machine)
logging.info("Finished upgrading of leader for application: {}"
.format(application))
# for machine in machines:
for unit_name, unit in non_leaders.items():
@@ -339,6 +360,8 @@ async def serial_series_upgrade(
files=files, workaround_script=workaround_script,
post_upgrade_functions=post_upgrade_functions)
completed_machines.append(machine)
logging.info("Finished upgrading non leaders for application: {}"
.format(application))
if follower_first and leader_machine not in completed_machines:
await model.async_wait_for_unit_idle(leader, include_subordinates=True)
@@ -354,6 +377,7 @@ async def serial_series_upgrade(
completed_machines.append(leader_machine)
await run_post_application_upgrade_functions(
post_application_upgrade_functions)
logging.info("Done series upgrade for: {}".format(application))
async def series_upgrade_machine(
@@ -381,17 +405,16 @@ async def series_upgrade_machine(
:returns: None
:rtype: None
"""
logging.info(
"About to series-upgrade ({})".format(machine))
logging.info("About to series-upgrade ({})".format(machine))
await run_pre_upgrade_functions(machine, pre_upgrade_functions)
await add_confdef_file(machine)
await async_dist_upgrade(machine)
await async_do_release_upgrade(machine)
await remove_confdef_file(machine)
await reboot(machine)
await series_upgrade_utils.async_complete_series_upgrade(machine)
if origin:
await os_utils.async_set_origin(application, origin)
await series_upgrade_utils.async_complete_series_upgrade(machine)
await run_post_upgrade_functions(post_upgrade_functions)
@@ -484,8 +507,7 @@ async def maybe_pause_things(
:returns: Nothing
:trype: None
"""
subordinate_pauses = []
leader_pauses = []
unit_pauses = []
for unit in units:
if pause_non_leader_subordinate:
if status["units"][unit].get("subordinates"):
@@ -495,15 +517,19 @@ async def maybe_pause_things(
logging.info("Skipping pausing {} - blacklisted"
.format(subordinate))
else:
logging.info("Pausing {}".format(subordinate))
subordinate_pauses.append(model.async_run_action(
subordinate, "pause", action_params={}))
unit_pauses.append(
_pause_helper("subordinate", subordinate))
if pause_non_leader_primary:
logging.info("Pausing {}".format(unit))
leader_pauses.append(
model.async_run_action(unit, "pause", action_params={}))
await asyncio.gather(*leader_pauses)
await asyncio.gather(*subordinate_pauses)
unit_pauses.append(_pause_helper("leader", unit))
if unit_pauses:
await asyncio.gather(*unit_pauses)
async def _pause_helper(_type, unit):
"""Pause helper to ensure that the log happens nearer to the action."""
logging.info("Pausing ({}) {}".format(_type, unit))
await model.async_run_action(unit, "pause", action_params={})
logging.info("Finished Pausing ({}) {}".format(_type, unit))
def get_leader_and_non_leaders(status):
@@ -541,14 +567,14 @@ async def prepare_series_upgrade(machine, to_series):
NOTE: This is a new feature in juju behind a feature flag and not yet in
libjuju.
export JUJU_DEV_FEATURE_FLAGS=upgrade-series
:param machine_num: Machine number
:type machine_num: str
:param machine: Machine number
:type machine: str
:param to_series: The series to which to upgrade
:type to_series: str
:returns: None
:rtype: None
"""
logging.info("Preparing series upgrade for: {}".format(machine))
logging.info("Preparing series upgrade for: %s", machine)
await series_upgrade_utils.async_prepare_series_upgrade(
machine, to_series=to_series)
@@ -564,9 +590,8 @@ async def reboot(machine):
try:
await model.async_run_on_machine(machine, 'sudo init 6 & exit')
# await run_on_machine(unit, "sudo reboot && exit")
except subprocess.CalledProcessError as e:
logging.warn("Error doing reboot: {}".format(e))
pass
except subprocess.CalledProcessError as error:
logging.warning("Error doing reboot: %s", error)
async def async_dist_upgrade(machine):
@@ -577,16 +602,31 @@ async def async_dist_upgrade(machine):
:returns: None
:rtype: None
"""
logging.info('Updating package db ' + machine)
logging.info('Updating package db %s', machine)
update_cmd = 'sudo apt-get update'
await model.async_run_on_machine(machine, update_cmd)
logging.info('Updating existing packages ' + machine)
logging.info('Updating existing packages %s', machine)
dist_upgrade_cmd = (
"""yes | sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes """
"""-o "Dpkg::Options::=--force-confdef" """
"""-o "Dpkg::Options::=--force-confold" dist-upgrade""")
await model.async_run_on_machine(machine, dist_upgrade_cmd)
rdict = await model.async_run_on_machine(
machine,
"cat /var/run/reboot-required || true")
if "Stdout" in rdict and "restart" in rdict["Stdout"].lower():
logging.info("dist-upgrade required reboot machine: %s", machine)
await reboot(machine)
logging.info("Waiting for machine to come back afer reboot: %s",
machine)
await model.async_block_until_file_missing_on_machine(
machine, "/var/run/reboot-required")
logging.info("Waiting for machine idleness on %s", machine)
await asyncio.sleep(5.0)
await model.async_block_until_units_on_machine_are_idle(machine)
# TODO: change this to wait on units on the machine
# await model.async_block_until_all_units_idle()
async def async_do_release_upgrade(machine):
@@ -597,7 +637,7 @@ async def async_do_release_upgrade(machine):
:returns: None
:rtype: None
"""
logging.info('Upgrading ' + machine)
logging.info('Upgrading %s', machine)
do_release_upgrade_cmd = (
'yes | sudo DEBIAN_FRONTEND=noninteractive '
'do-release-upgrade -f DistUpgradeViewNonInteractive')

View File

@@ -884,7 +884,8 @@ def dist_upgrade(unit_name):
"""-o "Dpkg::Options::=--force-confdef" """
"""-o "Dpkg::Options::=--force-confold" dist-upgrade""")
model.run_on_unit(unit_name, dist_upgrade_cmd)
rdict = model.run_on_unit(unit_name, "cat /var/run/reboot-required")
rdict = model.run_on_unit(unit_name,
"cat /var/run/reboot-required || true")
if "Stdout" in rdict and "restart" in rdict["Stdout"].lower():
logging.info("dist-upgrade required reboot {}".format(unit_name))
os_utils.reboot(unit_name)
@@ -919,8 +920,8 @@ async def async_dist_upgrade(unit_name):
"""-o "Dpkg::Options::=--force-confdef" """
"""-o "Dpkg::Options::=--force-confold" dist-upgrade""")
await model.async_run_on_unit(unit_name, dist_upgrade_cmd)
rdict = await model.async_run_on_unit(unit_name,
"cat /var/run/reboot-required")
rdict = await model.async_run_on_unit(
unit_name, "cat /var/run/reboot-required || true")
if "Stdout" in rdict and "restart" in rdict["Stdout"].lower():
logging.info("dist-upgrade required reboot {}".format(unit_name))
await os_utils.async_reboot(unit_name)

View File

@@ -19,6 +19,12 @@ import logging
import re
import zaza.model
import zaza.utilities.juju
from zaza.openstack.utilities.os_versions import (
OPENSTACK_CODENAMES,
UBUNTU_OPENSTACK_RELEASE,
OPENSTACK_RELEASES_PAIRS,
)
SERVICE_GROUPS = (
@@ -46,7 +52,7 @@ def get_upgrade_candidates(model_name=None, filters=None):
:param filters: List of filter functions to apply
:type filters: List[fn]
:returns: List of application that can have their payload upgraded.
:rtype: []
:rtype: Dict[str, Dict[str, ANY]]
"""
if filters is None:
filters = []
@@ -163,8 +169,8 @@ def get_series_upgrade_groups(model_name=None, extra_filters=None):
:param model_name: Name of model to query.
:type model_name: str
:returns: Dict of group lists keyed on group name.
:rtype: collections.OrderedDict
:returns: List of tuples(group name, applications)
:rtype: List[Tuple[str, Dict[str, ANY]]]
"""
filters = [_filter_subordinates]
filters = _apply_extra_filters(filters, extra_filters)
@@ -226,3 +232,110 @@ def extract_charm_name_from_url(charm_url):
"""
charm_name = re.sub(r'-[0-9]+$', '', charm_url.split('/')[-1])
return charm_name.split(':')[-1]
def get_all_principal_applications(model_name=None):
"""Return a list of all the prinical applications in the model.
:param model_name: Optional model name
:type model_name: Optional[str]
:returns: List of principal application names
:rtype: List[str]
"""
status = zaza.utilities.juju.get_full_juju_status(model_name=model_name)
return [application for application in status.applications.keys()
if not status.applications.get(application)['subordinate-to']]
def get_lowest_openstack_version(current_versions):
"""Get the lowest OpenStack version from the list of current versions.
:param current_versions: The list of versions
:type current_versions: List[str]
:returns: the lowest version currently installed.
:rtype: str
"""
lowest_version = 'zebra'
for svc in current_versions.keys():
if current_versions[svc] < lowest_version:
lowest_version = current_versions[svc]
return lowest_version
def determine_next_openstack_release(release):
"""Determine the next release after the one passed as a str.
The returned value is a tuple of the form: ('2020.1', 'ussuri')
:param release: the release to use as the base
:type release: str
:returns: the release tuple immediately after the current one.
:rtype: Tuple[str, str]
:raises: KeyError if the current release doesn't actually exist
"""
old_index = list(OPENSTACK_CODENAMES.values()).index(release)
new_index = old_index + 1
return list(OPENSTACK_CODENAMES.items())[new_index]
def determine_new_source(ubuntu_version, current_source, new_release,
single_increment=True):
"""Determine the new source/openstack-origin value based on new release.
This takes the ubuntu_version and the current_source (in the form of
'distro' or 'cloud:xenial-mitaka') and either converts it to a new source,
or returns None if the new_release will match the current_source (i.e. it's
already at the right release), or it's simply not possible.
If single_increment is set, then the returned source will only be returned
if the new_release is one more than the release in the current source.
:param ubuntu_version: the ubuntu version that the app is installed on.
:type ubuntu_version: str
:param current_source: a source in the form of 'distro' or
'cloud:xenial-mitaka'
:type current_source: str
:param new_release: a new OpenStack version codename. e.g. 'stein'
:type new_release: str
:param single_increment: If True, only allow single increment upgrade.
:type single_increment: boolean
:returns: The new source in the form of 'cloud:bionic-train' or None if not
possible
:rtype: Optional[str]
:raises: KeyError if any of the strings don't correspond to known values.
"""
logging.warn("determine_new_source: locals: %s", locals())
if current_source == 'distro':
# convert to a ubuntu-openstack pair
current_source = "cloud:{}-{}".format(
ubuntu_version, UBUNTU_OPENSTACK_RELEASE[ubuntu_version])
# strip out the current openstack version
if ':' not in current_source:
current_source = "cloud:{}-{}".format(ubuntu_version, current_source)
pair = current_source.split(':')[1]
u_version, os_version = pair.split('-', 2)
if u_version != ubuntu_version:
logging.warn("determine_new_source: ubuntu_versions don't match: "
"%s != %s" % (ubuntu_version, u_version))
return None
# determine versions
openstack_codenames = list(OPENSTACK_CODENAMES.values())
old_index = openstack_codenames.index(os_version)
try:
new_os_version = openstack_codenames[old_index + 1]
except IndexError:
logging.warn("determine_new_source: no OpenStack version after "
"'%s'" % os_version)
return None
if single_increment and new_release != new_os_version:
logging.warn("determine_new_source: requested version '%s' not a "
"single increment from '%s' which is '%s'" % (
new_release, os_version, new_os_version))
return None
# now check that there is a combination of u_version-new_os_version
new_pair = "{}_{}".format(u_version, new_os_version)
if new_pair not in OPENSTACK_RELEASES_PAIRS:
logging.warn("determine_new_source: now release pair candidate for "
" combination cloud:%s-%s" % (u_version, new_os_version))
return None
return "cloud:{}-{}".format(u_version, new_os_version)