Merge branch 'master' into ovs-mark-managed-ports

This commit is contained in:
Aurelien Lourot
2020-07-03 15:22:49 +02:00
65 changed files with 3674 additions and 872 deletions
+1
View File
@@ -0,0 +1 @@
recursive-include zaza/openstack *.j2
+2
View File
@@ -29,6 +29,7 @@ install_require = [
'async_generator',
'boto3',
'cryptography',
'dnspython',
'hvac<0.7.0',
'jinja2',
'juju',
@@ -107,6 +108,7 @@ setup(
license='Apache-2.0: http://www.apache.org/licenses/LICENSE-2.0',
packages=find_packages(exclude=["unit_tests"]),
zip_safe=False,
include_package_data=True,
cmdclass={'test': Tox},
install_requires=install_require,
extras_require={
+44
View File
@@ -0,0 +1,44 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import zaza.openstack.charm_tests.test_utils as test_utils
from unittest.mock import patch
class TestOpenStackBaseTest(unittest.TestCase):
@patch.object(test_utils.openstack_utils, 'get_cacert')
@patch.object(test_utils.openstack_utils, 'get_overcloud_keystone_session')
@patch.object(test_utils.BaseCharmTest, 'setUpClass')
def test_setUpClass(self, _setUpClass, _get_ovcks, _get_cacert):
class MyTestClass(test_utils.OpenStackBaseTest):
model_name = 'deadbeef'
MyTestClass.setUpClass('foo', 'bar')
_setUpClass.assert_called_with('foo', 'bar')
class TestUtils(unittest.TestCase):
def test_format_addr(self):
self.assertEquals('1.2.3.4', test_utils.format_addr('1.2.3.4'))
self.assertEquals(
'[2001:db8::42]', test_utils.format_addr('2001:db8::42'))
with self.assertRaises(ValueError):
test_utils.format_addr('999.999.999.999')
with self.assertRaises(ValueError):
test_utils.format_addr('2001:db8::g')
@@ -1,312 +0,0 @@
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unit_tests.utils as ut_utils
from zaza.openstack.utilities import juju as juju_utils
class TestJujuUtils(ut_utils.BaseTestCase):
def setUp(self):
super(TestJujuUtils, self).setUp()
# Juju Status Object and data
self.key = "instance-id"
self.key_data = "machine-uuid"
self.machine = "1"
self.machine_data = {self.key: self.key_data}
self.unit = "app/1"
self.unit_data = {"machine": self.machine}
self.application = "app"
self.application_data = {"units": {self.unit: self.unit_data}}
self.subordinate_application = "subordinate_application"
self.subordinate_application_data = {
"subordinate-to": [self.application]}
self.juju_status = mock.MagicMock()
self.juju_status.name = "juju_status_object"
self.juju_status.applications.get.return_value = self.application_data
self.juju_status.machines.get.return_value = self.machine_data
# Model
self.patch_object(juju_utils, "model")
self.model_name = "model-name"
self.model.get_juju_model.return_value = self.model_name
self.model.get_status.return_value = self.juju_status
self.run_output = {"Code": "0", "Stderr": "", "Stdout": "RESULT"}
self.error_run_output = {"Code": "1", "Stderr": "ERROR", "Stdout": ""}
self.model.run_on_unit.return_value = self.run_output
# Clouds
self.cloud_name = "FakeCloudName"
self.cloud_type = "FakeCloudType"
self.clouds = {
"clouds":
{self.cloud_name:
{"type": self.cloud_type}}}
# Controller
self.patch_object(juju_utils, "controller")
self.controller.get_cloud.return_value = self.cloud_name
def test_get_application_status(self):
self.patch_object(juju_utils, "get_full_juju_status")
self.get_full_juju_status.return_value = self.juju_status
# Full status juju object return
self.assertEqual(
juju_utils.get_application_status(), self.juju_status)
self.get_full_juju_status.assert_called_once()
# Application only dictionary return
self.assertEqual(
juju_utils.get_application_status(application=self.application),
self.application_data)
# Unit no application dictionary return
self.assertEqual(
juju_utils.get_application_status(unit=self.unit),
self.unit_data)
def test_get_cloud_configs(self):
self.patch_object(juju_utils.Path, "home")
self.patch_object(juju_utils.generic_utils, "get_yaml_config")
self.get_yaml_config.return_value = self.clouds
# All the cloud configs
self.assertEqual(juju_utils.get_cloud_configs(), self.clouds)
# With cloud specified
self.assertEqual(juju_utils.get_cloud_configs(self.cloud_name),
self.clouds["clouds"][self.cloud_name])
def test_get_full_juju_status(self):
self.assertEqual(juju_utils.get_full_juju_status(), self.juju_status)
self.model.get_status.assert_called_once_with(model_name=None)
def test_get_machines_for_application(self):
self.patch_object(juju_utils, "get_application_status")
self.get_application_status.return_value = self.application_data
# Machine data
self.assertEqual(
next(juju_utils.get_machines_for_application(self.application)),
self.machine)
self.get_application_status.assert_called_once()
# Subordinate application has no units
def _get_application_status(application, model_name=None):
_apps = {
self.application: self.application_data,
self.subordinate_application:
self.subordinate_application_data}
return _apps[application]
self.get_application_status.side_effect = _get_application_status
self.assertEqual(
next(juju_utils.get_machines_for_application(
self.subordinate_application)),
self.machine)
def test_get_unit_name_from_host_name(self):
unit_mock1 = mock.MagicMock()
unit_mock1.data = {'machine-id': 12}
unit_mock1.entity_id = 'myapp/2'
unit_mock2 = mock.MagicMock()
unit_mock2.data = {'machine-id': 15}
unit_mock2.entity_id = 'myapp/5'
self.model.get_units.return_value = [unit_mock1, unit_mock2]
self.assertEqual(
juju_utils.get_unit_name_from_host_name('juju-model-12', 'myapp'),
'myapp/2')
def test_get_machine_status(self):
self.patch_object(juju_utils, "get_full_juju_status")
self.get_full_juju_status.return_value = self.juju_status
# All machine data
self.assertEqual(
juju_utils.get_machine_status(self.machine),
self.machine_data)
self.get_full_juju_status.assert_called_once()
# Request a specific key
self.assertEqual(
juju_utils.get_machine_status(self.machine, self.key),
self.key_data)
def test_get_machine_uuids_for_application(self):
self.patch_object(juju_utils, "get_machines_for_application")
self.get_machines_for_application.return_value = [self.machine]
self.assertEqual(
next(juju_utils.get_machine_uuids_for_application(
self.application)),
self.machine_data.get("instance-id"))
self.get_machines_for_application.assert_called_once_with(
self.application, model_name=None)
def test_get_provider_type(self):
self.patch_object(juju_utils, "get_cloud_configs")
self.get_cloud_configs.return_value = {"type": self.cloud_type}
self.assertEqual(juju_utils.get_provider_type(),
self.cloud_type)
self.get_cloud_configs.assert_called_once_with(self.cloud_name)
def test_remote_run(self):
_cmd = "do the thing"
# Success
self.assertEqual(juju_utils.remote_run(self.unit, _cmd),
self.run_output["Stdout"])
self.model.run_on_unit.assert_called_once_with(
self.unit, _cmd, timeout=None, model_name=None)
# Non-fatal failure
self.model.run_on_unit.return_value = self.error_run_output
self.assertEqual(
juju_utils.remote_run(
self.unit,
_cmd,
fatal=False,
model_name=None),
self.error_run_output["Stderr"])
# Fatal failure
with self.assertRaises(Exception):
juju_utils.remote_run(self.unit, _cmd, fatal=True)
def test_get_unit_names(self):
self.patch('zaza.model.get_first_unit_name', new_callable=mock.Mock(),
name='_get_first_unit_name')
juju_utils._get_unit_names(['aunit/0', 'otherunit/0'])
self.assertFalse(self._get_first_unit_name.called)
def test_get_unit_names_called_with_application_name(self):
self.patch_object(juju_utils, 'model')
juju_utils._get_unit_names(['aunit', 'otherunit/0'])
self.model.get_first_unit_name.assert_called()
def test_get_relation_from_unit(self):
self.patch_object(juju_utils, '_get_unit_names')
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
self._get_unit_names.return_value = ['aunit/0', 'otherunit/0']
data = {'foo': 'bar'}
self.model.get_relation_id.return_value = 42
self.model.run_on_unit.return_value = {'Code': 0, 'Stdout': str(data)}
juju_utils.get_relation_from_unit('aunit/0', 'otherunit/0',
'arelation')
self.model.run_on_unit.assert_called_with(
'aunit/0',
'relation-get --format=yaml -r "42" - "otherunit/0"',
model_name=None)
self.yaml.safe_load.assert_called_with(str(data))
def test_get_relation_from_unit_fails(self):
self.patch_object(juju_utils, '_get_unit_names')
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
self._get_unit_names.return_value = ['aunit/0', 'otherunit/0']
self.model.get_relation_id.return_value = 42
self.model.run_on_unit.return_value = {'Code': 1, 'Stderr': 'ERROR'}
with self.assertRaises(Exception):
juju_utils.get_relation_from_unit('aunit/0', 'otherunit/0',
'arelation')
self.model.run_on_unit.assert_called_with(
'aunit/0',
'relation-get --format=yaml -r "42" - "otherunit/0"',
model_name=None)
self.assertFalse(self.yaml.safe_load.called)
def test_leader_get(self):
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
data = {'foo': 'bar'}
self.model.run_on_leader.return_value = {
'Code': 0, 'Stdout': str(data)}
juju_utils.leader_get('application')
self.model.run_on_leader.assert_called_with(
'application', 'leader-get --format=yaml ', model_name=None)
self.yaml.safe_load.assert_called_with(str(data))
def test_leader_get_key(self):
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
data = {'foo': 'bar'}
self.model.run_on_leader.return_value = {
'Code': 0, 'Stdout': data['foo']}
juju_utils.leader_get('application', 'foo')
self.model.run_on_leader.assert_called_with(
'application', 'leader-get --format=yaml foo', model_name=None)
self.yaml.safe_load.assert_called_with(data['foo'])
def test_leader_get_fails(self):
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
self.model.run_on_leader.return_value = {
'Code': 1, 'Stderr': 'ERROR'}
with self.assertRaises(Exception):
juju_utils.leader_get('application')
self.model.run_on_leader.assert_called_with(
'application', 'leader-get --format=yaml ',
model_name=None)
self.assertFalse(self.yaml.safe_load.called)
def test_get_machine_series(self):
self.patch(
'zaza.openstack.utilities.juju.get_machine_status',
new_callable=mock.MagicMock(),
name='_get_machine_status'
)
self._get_machine_status.return_value = 'xenial'
expected = 'xenial'
actual = juju_utils.get_machine_series('6')
self._get_machine_status.assert_called_with(
machine='6',
key='series',
model_name=None
)
self.assertEqual(expected, actual)
def test_get_subordinate_units(self):
juju_status = mock.MagicMock()
juju_status.applications = {
'nova-compute': {
'units': {
'nova-compute/0': {
'subordinates': {
'neutron-openvswitch/2': {
'charm': 'cs:neutron-openvswitch-22'}}}}},
'cinder': {
'units': {
'cinder/1': {
'subordinates': {
'cinder-hacluster/0': {
'charm': 'cs:hacluster-42'},
'cinder-ceph/3': {
'charm': 'cs:cinder-ceph-2'}}}}},
}
self.assertEqual(
sorted(juju_utils.get_subordinate_units(
['nova-compute/0', 'cinder/1'],
status=juju_status)),
sorted(['neutron-openvswitch/2', 'cinder-hacluster/0',
'cinder-ceph/3']))
self.assertEqual(
juju_utils.get_subordinate_units(
['nova-compute/0', 'cinder/1'],
charm_name='ceph',
status=juju_status),
['cinder-ceph/3'])
@@ -293,8 +293,9 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.patch_object(openstack_utils.urllib.request, "ProxyHandler")
self.patch_object(openstack_utils.urllib.request, "HTTPHandler")
self.patch_object(openstack_utils.urllib.request, "build_opener")
self.patch_object(openstack_utils.os, "getenv")
self.getenv.return_value = None
self.patch_object(openstack_utils.deployment_env,
"get_deployment_context",
return_value=dict(TEST_HTTP_PROXY=None))
HTTPHandler_mock = mock.MagicMock()
self.HTTPHandler.return_value = HTTPHandler_mock
openstack_utils.get_urllib_opener()
@@ -305,8 +306,9 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.patch_object(openstack_utils.urllib.request, "ProxyHandler")
self.patch_object(openstack_utils.urllib.request, "HTTPHandler")
self.patch_object(openstack_utils.urllib.request, "build_opener")
self.patch_object(openstack_utils.os, "getenv")
self.getenv.return_value = 'http://squidy'
self.patch_object(openstack_utils.deployment_env,
"get_deployment_context",
return_value=dict(TEST_HTTP_PROXY='http://squidy'))
ProxyHandler_mock = mock.MagicMock()
self.ProxyHandler.return_value = ProxyHandler_mock
openstack_utils.get_urllib_opener()
@@ -738,7 +740,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
'bob',
'10.0.0.10',
'myvm',
password='reallyhardpassord')
password='reallyhardpassord',
retry=False)
paramiko_mock.connect.assert_called_once_with(
'10.0.0.10',
password='reallyhardpassord',
@@ -1215,3 +1218,33 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.assertTrue(openstack_utils.ovn_present())
self.get_application.side_effect = [KeyError, KeyError]
self.assertFalse(openstack_utils.ovn_present())
def test_configure_gateway_ext_port(self):
# FIXME: this is not a complete unit test for the function as one did
# not exist at all I'm adding this to test one bit and we'll add more
# as we go.
self.patch_object(openstack_utils, 'deprecated_external_networking')
self.patch_object(openstack_utils, 'dvr_enabled')
self.patch_object(openstack_utils, 'ovn_present')
self.patch_object(openstack_utils, 'get_gateway_uuids')
self.patch_object(openstack_utils, 'get_admin_net')
self.dvr_enabled = False
self.ovn_present = False
self.get_admin_net.return_value = {'id': 'fakeid'}
novaclient = mock.MagicMock()
neutronclient = mock.MagicMock()
def _fake_empty_generator(empty=True):
if empty:
return
yield
self.get_gateway_uuids.side_effect = _fake_empty_generator
with self.assertRaises(RuntimeError):
openstack_utils.configure_gateway_ext_port(
novaclient, neutronclient)
# provide a uuid and check that we don't raise RuntimeError
self.get_gateway_uuids.side_effect = ['fake-uuid']
openstack_utils.configure_gateway_ext_port(
novaclient, neutronclient)
+78 -1
View File
@@ -19,7 +19,11 @@
import logging
import tenacity
import novaclient.exceptions
import zaza.model
import zaza.openstack.configure.guest
import zaza.openstack.charm_tests.glance.setup as glance_setup
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.configure.telemetry as telemetry_utils
@@ -33,7 +37,7 @@ class AodhTest(test_utils.OpenStackBaseTest):
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(AodhTest, cls).setUpClass()
super(AodhTest, cls).setUpClass(application_name='aodh')
cls.xenial_ocata = openstack_utils.get_os_release('xenial_ocata')
cls.xenial_newton = openstack_utils.get_os_release('xenial_newton')
cls.bionic_stein = openstack_utils.get_os_release('bionic_stein')
@@ -134,3 +138,76 @@ class AodhTest(test_utils.OpenStackBaseTest):
pgrep_full=False):
logging.info("Testing pause resume")
self.query_aodh_api()
class AodhServerAlarmTest(test_utils.OpenStackBaseTest):
"""Test server events trigger Aodh alarms."""
RESOURCE_PREFIX = 'zaza-aodhtests'
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(AodhServerAlarmTest, cls).setUpClass(application_name='aodh')
cls.aodh_client = openstack_utils.get_aodh_session_client(
cls.keystone_session)
cls.nova_client = openstack_utils.get_nova_session_client(
cls.keystone_session)
cls.run_resource_cleanup = True
@classmethod
def resource_cleanup(cls):
"""Remove test resources."""
logging.info('Running teardown')
for alarm in cls.aodh_client.alarm.list():
if alarm['name'].startswith(cls.RESOURCE_PREFIX):
logging.info('Removing Alarm {}'.format(alarm['name']))
telemetry_utils.delete_alarm(
cls.aodh_client,
alarm['name'],
cache_wait=False)
for server in cls.nova_client.servers.list():
if server.name.startswith(cls.RESOURCE_PREFIX):
logging.info('Removing server {}'.format(server.name))
openstack_utils.delete_resource(
cls.nova_client.servers,
server.id,
msg="server")
def test_alarm_on_power_off(self):
"""Test server alarm is triggered when server is powered off."""
server_name = '{}-server'.format(self.RESOURCE_PREFIX)
alarm_name = '{}_instance_off'.format(self.RESOURCE_PREFIX)
try:
server = self.nova_client.servers.find(name=server_name)
logging.info("Found existing server {}".format(server_name))
except novaclient.exceptions.NotFound:
logging.info("Launching new server {}".format(server_name))
server = zaza.openstack.configure.guest.launch_instance(
glance_setup.LTS_IMAGE_NAME,
vm_name=server_name)
assert server.status == 'ACTIVE', "Server {} not active".format(
server.name)
logging.info('Deleting alarm {} if it exists'.format(alarm_name))
telemetry_utils.delete_alarm(
self.aodh_client,
alarm_name,
cache_wait=True)
logging.info('Creating alarm {}'.format(alarm_name))
alarm_info = telemetry_utils.create_server_power_off_alarm(
self.aodh_client,
alarm_name,
server.id)
alarm_state = telemetry_utils.get_alarm_state(
self.aodh_client,
alarm_info['alarm_id'])
logging.info('Alarm in state {}'.format(alarm_state))
# Until data is collected alarm come up in an 'insufficient data'
# state.
self.assertEqual(alarm_state, 'insufficient data')
logging.info('Stopping server {}'.format(server.name))
server.stop()
telemetry_utils.block_until_alarm_state(
self.aodh_client,
alarm_info['alarm_id'])
+1 -1
View File
@@ -22,7 +22,7 @@ import zaza.openstack.utilities.openstack as openstack_utils
class BarbicanTest(test_utils.OpenStackBaseTest):
"""Run nova-compute specific tests."""
"""Run barbican specific tests."""
_SERVICES = ['apache2', 'barbican-worker']
@@ -28,11 +28,11 @@ def basic_setup():
tests.
"""
current_release = openstack_utils.get_os_release()
xenial_pike = openstack_utils.get_os_release('xenial_pike')
xenial_ocata = openstack_utils.get_os_release('xenial_ocata')
if current_release < xenial_pike:
if current_release < xenial_ocata:
logging.info(
'Skipping ceilometer-upgrade as it is not supported before Pike')
'Skipping ceilometer-upgrade as it is not supported before ocata')
return
logging.debug('Checking ceilometer-upgrade')
@@ -101,7 +101,7 @@ class CeilometerTest(test_utils.OpenStackBaseTest):
def test_400_api_connection(self):
"""Simple api calls to check service is up and responding."""
if self.current_release >= CeilometerTest.XENIAL_PIKE:
if self.current_release >= CeilometerTest.XENIAL_OCATA:
logging.info('Skipping API checks as ceilometer api has been '
'removed')
return
+20 -22
View File
@@ -14,10 +14,10 @@
"""Encapsulate CephFS testing."""
import logging
from tenacity import Retrying, stop_after_attempt, wait_exponential
import zaza.model as model
import zaza.openstack.charm_tests.glance.setup as glance_setup
import zaza.openstack.charm_tests.neutron.tests as neutron_tests
import zaza.openstack.charm_tests.nova.utils as nova_utils
import zaza.openstack.charm_tests.test_utils as test_utils
@@ -63,27 +63,10 @@ write_files:
conf = model.run_on_leader(
'ceph-mon', 'cat /etc/ceph/ceph.conf')['Stdout']
# Spawn Servers
for attempt in Retrying(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=2, max=10)):
with attempt:
instance_1 = guest.launch_instance(
glance_setup.LTS_IMAGE_NAME,
vm_name='{}-ins-1'.format(self.RESOURCE_PREFIX),
userdata=self.INSTANCE_USERDATA.format(
_indent(conf, 8),
_indent(keyring, 8)))
for attempt in Retrying(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=2, max=10)):
with attempt:
instance_2 = guest.launch_instance(
glance_setup.LTS_IMAGE_NAME,
vm_name='{}-ins-2'.format(self.RESOURCE_PREFIX),
userdata=self.INSTANCE_USERDATA.format(
_indent(conf, 8),
_indent(keyring, 8)))
instance_1, instance_2 = self.launch_guests(
userdata=self.INSTANCE_USERDATA.format(
_indent(conf, 8),
_indent(keyring, 8)))
# Write a file on instance_1
def verify_setup(stdin, stdout, stderr):
@@ -124,3 +107,18 @@ write_files:
def _indent(text, amount, ch=' '):
padding = amount * ch
return ''.join(padding+line for line in text.splitlines(True))
class CharmOperationTest(test_utils.BaseCharmTest):
"""CephFS Charm operation tests."""
def test_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped, then resume and check
they are started.
"""
services = ['ceph-mds']
with self.pause_resume(services):
logging.info('Testing pause resume (services="{}")'
.format(services))
@@ -0,0 +1,15 @@
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing ceph-mon for cinder-ceph."""
@@ -0,0 +1,200 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ceph-mon Testing for cinder-ceph."""
import logging
import zaza.model
from zaza.openstack.utilities import (
generic as generic_utils,
openstack as openstack_utils,
exceptions as zaza_exceptions
)
import zaza.openstack.charm_tests.test_utils as test_utils
class CinderCephMonTest(test_utils.OpenStackBaseTest):
"""Verify that the ceph mon units are healthy."""
@classmethod
def setUpClass(cls):
"""Run class setup for running ceph mon tests with cinder."""
super().setUpClass()
# ported from the cinder-ceph Amulet test
def test_499_ceph_cmds_exit_zero(self):
"""Verify expected state with security-checklist."""
logging.info("Checking exit values are 0 on ceph commands.")
units = zaza.model.get_units("ceph-mon", model_name=self.model_name)
current_release = openstack_utils.get_os_release()
bionic_train = openstack_utils.get_os_release('bionic_train')
if current_release < bionic_train:
units.extend(zaza.model.get_units("cinder-ceph",
model_name=self.model_name))
commands = [
'sudo ceph health',
'sudo ceph mds stat',
'sudo ceph pg stat',
'sudo ceph osd stat',
'sudo ceph mon stat',
]
for unit in units:
run_commands(unit.name, commands)
# ported from the cinder-ceph Amulet test
def test_500_ceph_alternatives_cleanup(self):
"""Check ceph alternatives removed when ceph-mon relation is broken."""
# Skip this test if release is less than xenial_ocata as in that case
# cinder HAS a relation with ceph directly and this test would fail
current_release = openstack_utils.get_os_release()
xenial_ocata = openstack_utils.get_os_release('xenial_ocata')
if current_release < xenial_ocata:
logging.info("Skipping test as release < xenial-ocata")
return
units = zaza.model.get_units("cinder-ceph",
model_name=self.model_name)
# check each unit prior to breaking relation
for unit in units:
dir_list = directory_listing(unit.name, "/etc/ceph")
if 'ceph.conf' in dir_list:
logging.debug(
"/etc/ceph/ceph.conf exists BEFORE relation-broken")
else:
raise zaza_exceptions.CephGenericError(
"unit: {} - /etc/ceph/ceph.conf does not exist "
"BEFORE relation-broken".format(unit.name))
# remove the relation so that /etc/ceph/ceph.conf is removed
logging.info("Removing ceph-mon:client <-> cinder-ceph:ceph relation")
zaza.model.remove_relation(
"ceph-mon", "ceph-mon:client", "cinder-ceph:ceph")
# zaza.model.wait_for_agent_status()
logging.info("Wait till relation is removed...")
ceph_mon_units = zaza.model.get_units("ceph-mon",
model_name=self.model_name)
conditions = [
invert_condition(
does_relation_exist(
u.name, "ceph-mon", "cinder-ceph", "ceph",
self.model_name))
for u in ceph_mon_units]
zaza.model.block_until(*conditions)
logging.info("Checking each unit after breaking relation...")
for unit in units:
dir_list = directory_listing(unit.name, "/etc/ceph")
if 'ceph.conf' not in dir_list:
logging.debug(
"/etc/ceph/ceph.conf removed AFTER relation-broken")
else:
raise zaza_exceptions.CephGenericError(
"unit: {} - /etc/ceph/ceph.conf still exists "
"AFTER relation-broken".format(unit.name))
# Restore cinder-ceph and ceph-mon relation to keep tests idempotent
logging.info("Restoring ceph-mon:client <-> cinder-ceph:ceph relation")
zaza.model.add_relation(
"ceph-mon", "ceph-mon:client", "cinder-ceph:ceph")
conditions = [
does_relation_exist(
u.name, "ceph-mon", "cinder-ceph", "ceph", self.model_name)
for u in ceph_mon_units]
logging.info("Wait till model is idle ...")
zaza.model.block_until(*conditions)
zaza.model.block_until_all_units_idle()
logging.info("... Done.")
def does_relation_exist(unit_name,
application_name,
remote_application_name,
remote_interface_name,
model_name):
"""For use in async blocking function, return True if it exists.
:param unit_name: the unit (by name) that to check on.
:type unit_name: str
:param application_name: Name of application on this side of relation
:type application_name: str
:param remote_application_name: the relation name at that unit to check for
:type relation_application_name: str
:param remote_interface_name: the interface name at that unit to check for
:type relation_interface_name: str
:param model_name: the model to check on
:type model_name: str
:returns: Corouting that returns True if the relation was found
:rtype: Coroutine[[], boolean]
"""
async def _async_does_relation_exist_closure():
async with zaza.model.run_in_model(model_name) as model:
spec = "{}:{}".format(
remote_application_name, remote_interface_name)
for rel in model.applications[application_name].relations:
if rel.matches(spec):
return True
return False
return _async_does_relation_exist_closure
def invert_condition(async_condition):
"""Invert the condition provided so it can be provided to the blocking fn.
:param async_condition: the async callable that is the test
:type async_condition: Callable[]
:returns: Corouting that returns not of the result of a the callable
:rtype: Coroutine[[], bool]
"""
async def _async_invert_condition_closure():
return not(await async_condition())
return _async_invert_condition_closure
def run_commands(unit_name, commands):
"""Run commands on unit.
Apply context to commands until all variables have been replaced, then
run the command on the given unit.
"""
errors = []
for cmd in commands:
try:
generic_utils.assertRemoteRunOK(zaza.model.run_on_unit(
unit_name,
cmd))
except Exception as e:
errors.append("unit: {}, command: {}, error: {}"
.format(unit_name, cmd, str(e)))
if errors:
raise zaza_exceptions.CephGenericError("\n".join(errors))
def directory_listing(unit_name, directory):
"""Return a list of files/directories from a directory on a unit.
:param unit_name: the unit to fetch the directory listing from
:type unit_name: str
:param directory: the directory to fetch the listing from
:type directory: str
:returns: A listing using "ls -1" on the unit
:rtype: List[str]
"""
result = zaza.model.run_on_unit(unit_name, "ls -1 {}".format(directory))
return result['Stdout'].splitlines()
@@ -23,7 +23,9 @@ import zaza.model
import zaza.openstack.utilities.ceph
import zaza.openstack.utilities.openstack as openstack
from zaza.openstack.charm_tests.glance.setup import LTS_IMAGE_NAME
from zaza.openstack.charm_tests.glance.setup import (
LTS_IMAGE_NAME,
CIRROS_IMAGE_NAME)
class CephRBDMirrorBase(test_utils.OpenStackBaseTest):
@@ -197,7 +199,14 @@ class CephRBDMirrorTest(CephRBDMirrorBase):
glance = openstack.get_glance_session_client(session)
cinder = openstack.get_cinder_session_client(session)
image = next(glance.images.list(name=LTS_IMAGE_NAME))
images = openstack.get_images_by_name(glance, CIRROS_IMAGE_NAME)
if images:
image = images[0]
else:
logging.info("Failed to find {} image, falling back to {}".format(
CIRROS_IMAGE_NAME,
LTS_IMAGE_NAME))
image = openstack.get_images_by_name(glance, LTS_IMAGE_NAME)[0]
# NOTE(fnordahl): for some reason create volume from image often fails
# when run just after deployment is finished. We should figure out
@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test charm upgrade."""
@@ -0,0 +1,78 @@
#!/usr/bin/env python3
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define class for Charm Upgrade."""
import logging
import unittest
import zaza.model
from zaza.openstack.utilities import (
cli as cli_utils,
upgrade_utils as upgrade_utils,
)
from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest
class FullCloudCharmUpgradeTest(unittest.TestCase):
"""Class to encapsulate Charm Upgrade Tests."""
@classmethod
def setUpClass(cls):
"""Run setup for Charm Upgrades."""
cli_utils.setup_logging()
cls.lts = LTSGuestCreateTest()
cls.target_charm_namespace = '~openstack-charmers-next'
def get_upgrade_url(self, charm_url):
"""Return the charm_url to upgrade to.
:param charm_url: Current charm url.
:type charm_url: str
"""
charm_name = upgrade_utils.extract_charm_name_from_url(
charm_url)
next_charm_url = zaza.model.get_latest_charm_url(
"cs:{}/{}".format(self.target_charm_namespace, charm_name))
return next_charm_url
def test_200_run_charm_upgrade(self):
"""Run charm upgrade."""
self.lts.test_launch_small_instance()
applications = zaza.model.get_status().applications
groups = upgrade_utils.get_charm_upgrade_groups()
for group_name, group in groups.items():
logging.info("About to upgrade {} ({})".format(group_name, group))
for application, app_details in applications.items():
if application not in group:
continue
target_url = self.get_upgrade_url(app_details['charm'])
if target_url == app_details['charm']:
logging.warn(
"Skipping upgrade of {}, already using {}".format(
application,
target_url))
else:
logging.info("Upgrading {} to {}".format(
application,
target_url))
zaza.model.upgrade_charm(
application,
switch=target_url)
logging.info("Waiting for charm url to update")
zaza.model.block_until_charm_url(application, target_url)
zaza.model.block_until_all_units_idle()
self.lts.test_launch_small_instance()
+90 -23
View File
@@ -23,6 +23,12 @@ import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.glance.setup as glance_setup
from tenacity import (
Retrying,
stop_after_attempt,
wait_exponential,
)
class CinderTests(test_utils.OpenStackBaseTest):
"""Encapsulate Cinder tests."""
@@ -32,7 +38,10 @@ class CinderTests(test_utils.OpenStackBaseTest):
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(CinderTests, cls).setUpClass()
super(CinderTests, cls).setUpClass(application_name='cinder')
cls.application_name = 'cinder'
cls.lead_unit = zaza.model.get_lead_unit_name(
"cinder", model_name=cls.model_name)
cls.cinder_client = openstack_utils.get_cinder_session_client(
cls.keystone_session)
cls.nova_client = openstack_utils.get_nova_session_client(
@@ -42,18 +51,66 @@ class CinderTests(test_utils.OpenStackBaseTest):
def tearDown(cls):
"""Remove test resources."""
logging.info('Running teardown')
for snapshot in cls.cinder_client.volume_snapshots.list():
for attempt in Retrying(
stop=stop_after_attempt(8),
wait=wait_exponential(multiplier=1, min=2, max=60)):
with attempt:
volumes = list(cls.cinder_client.volumes.list())
snapped_volumes = [v for v in volumes
if v.name.endswith("-from-snap")]
if snapped_volumes:
logging.info("Removing volumes from snapshot")
cls._remove_volumes(snapped_volumes)
volumes = list(cls.cinder_client.volumes.list())
snapshots = list(cls.cinder_client.volume_snapshots.list())
if snapshots:
logging.info("tearDown - snapshots: {}".format(
", ".join(s.name for s in snapshots)))
cls._remove_snapshots(snapshots)
if volumes:
logging.info("tearDown - volumes: {}".format(
", ".join(v.name for v in volumes)))
cls._remove_volumes(volumes)
@classmethod
def _remove_snapshots(cls, snapshots):
"""Remove snapshots passed as param.
:param volumes: the snapshots to delete
:type volumes: List[snapshot objects]
"""
for snapshot in snapshots:
if snapshot.name.startswith(cls.RESOURCE_PREFIX):
openstack_utils.delete_resource(
cls.cinder_client.volume_snapshots,
snapshot.id,
msg="snapshot")
for volume in cls.cinder_client.volumes.list():
logging.info("removing snapshot: {}".format(snapshot.name))
try:
openstack_utils.delete_resource(
cls.cinder_client.volume_snapshots,
snapshot.id,
msg="snapshot")
except Exception as e:
logging.error("error removing snapshot: {}".format(str(e)))
raise
@classmethod
def _remove_volumes(cls, volumes):
"""Remove volumes passed as param.
:param volumes: the volumes to delete
:type volumes: List[volume objects]
"""
for volume in volumes:
if volume.name.startswith(cls.RESOURCE_PREFIX):
openstack_utils.delete_resource(
cls.cinder_client.volumes,
volume.id,
msg="volume")
logging.info("removing volume: {}".format(volume.name))
try:
openstack_utils.delete_resource(
cls.cinder_client.volumes,
volume.id,
msg="volume")
except Exception as e:
logging.error("error removing volume: {}".format(str(e)))
raise
def test_100_volume_create_extend_delete(self):
"""Test creating, extending a volume."""
@@ -80,12 +137,18 @@ class CinderTests(test_utils.OpenStackBaseTest):
def test_105_volume_create_from_img(self):
"""Test creating a volume from an image."""
logging.debug("finding image {} ..."
.format(glance_setup.LTS_IMAGE_NAME))
image = self.nova_client.glance.find_image(
glance_setup.LTS_IMAGE_NAME)
logging.debug("using cinder_client to create volume from image {}"
.format(image.id))
vol_img = self.cinder_client.volumes.create(
name='{}-105-vol-from-img'.format(self.RESOURCE_PREFIX),
size=3,
imageRef=image.id)
logging.debug("now waiting for volume {} to reach available"
.format(vol_img.id))
openstack_utils.resource_reaches_status(
self.cinder_client.volumes,
vol_img.id,
@@ -154,12 +217,22 @@ class CinderTests(test_utils.OpenStackBaseTest):
@property
def services(self):
"""Return a list services for the selected OpenStack release."""
services = ['cinder-scheduler', 'cinder-volume']
if (openstack_utils.get_os_release() >=
openstack_utils.get_os_release('xenial_ocata')):
services.append('apache2')
current_value = zaza.model.get_application_config(
self.application_name)['enabled-services']['value']
if current_value == "all":
services = ['cinder-scheduler', 'cinder-volume', 'cinder-api']
else:
services.append('cinder-api')
services = ['cinder-{}'.format(svc)
for svc in ('api', 'scheduler', 'volume')
if svc in current_value]
if ('cinder-api' in services and
(openstack_utils.get_os_release() >=
openstack_utils.get_os_release('xenial_ocata'))):
services.remove('cinder-api')
services.append('apache2')
return services
def test_900_restart_on_config_change(self):
@@ -183,13 +256,7 @@ class CinderTests(test_utils.OpenStackBaseTest):
Pause service and check services are stopped then resume and check
they are started
"""
services = ['cinder-scheduler', 'cinder-volume']
if (openstack_utils.get_os_release() >=
openstack_utils.get_os_release('xenial_ocata')):
services.append('apache2')
else:
services.append('cinder-api')
with self.pause_resume(services):
with self.pause_resume(self.services):
logging.info("Testing pause resume")
+110 -29
View File
@@ -14,14 +14,20 @@
"""Encapsulate designate testing."""
import logging
import unittest
import tenacity
import subprocess
import designateclient.v1.domains as domains
import designateclient.v1.records as records
import designateclient.v1.servers as servers
import zaza.model
import zaza.openstack.utilities.juju as zaza_juju
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.designate.utils as designate_utils
import zaza.charm_lifecycle.utils as lifecycle_utils
class BaseDesignateTest(test_utils.OpenStackBaseTest):
@@ -83,8 +89,8 @@ class BaseDesignateTest(test_utils.OpenStackBaseTest):
cls.server_delete = cls.designate.servers.delete
class DesignateTests(BaseDesignateTest):
"""Designate charm restart and pause tests."""
class DesignateAPITests(BaseDesignateTest):
"""Tests interact with designate api."""
TEST_DOMAIN = 'amuletexample.com.'
TEST_NS1_RECORD = 'ns1.{}'.format(TEST_DOMAIN)
@@ -92,33 +98,6 @@ class DesignateTests(BaseDesignateTest):
TEST_WWW_RECORD = "www.{}".format(TEST_DOMAIN)
TEST_RECORD = {TEST_WWW_RECORD: '10.0.0.23'}
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change.
Change debug mode and assert that change propagates to the correct
file and that services are restarted as a result
"""
# Services which are expected to restart upon config change,
# and corresponding config files affected by the change
conf_file = '/etc/designate/designate.conf'
# Make config change, check for service restarts
self.restart_on_changed_debug_oslo_config_file(
conf_file,
self.designate_svcs,
)
def test_910_pause_and_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started
"""
with self.pause_resume(
self.designate_svcs,
pgrep_full=False):
logging.info("Testing pause resume")
def _get_server_id(self, server_name=None, server_id=None):
for srv in self.server_list():
if isinstance(srv, dict):
@@ -245,3 +224,105 @@ class DesignateTests(BaseDesignateTest):
logging.debug('Tidy up delete test record')
self._wait_on_domain_gone(domain_id)
logging.debug('OK')
class DesignateCharmTests(BaseDesignateTest):
"""Designate charm restart and pause tests."""
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change.
Change debug mode and assert that change propagates to the correct
file and that services are restarted as a result
"""
# Services which are expected to restart upon config change,
# and corresponding config files affected by the change
conf_file = '/etc/designate/designate.conf'
# Make config change, check for service restarts
self.restart_on_changed_debug_oslo_config_file(
conf_file,
self.designate_svcs,
)
def test_910_pause_and_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started
"""
with self.pause_resume(
self.designate_svcs,
pgrep_full=False):
logging.info("Testing pause resume")
class DesignateTests(DesignateAPITests, DesignateCharmTests):
"""Collection of all Designate test classes."""
pass
class DesignateBindExpand(BaseDesignateTest):
"""Test expanding and shrinking bind."""
TEST_DOMAIN = 'zazabindtesting.com.'
TEST_NS1_RECORD = 'ns1.{}'.format(TEST_DOMAIN)
TEST_NS2_RECORD = 'ns2.{}'.format(TEST_DOMAIN)
TEST_WWW_RECORD = "www.{}".format(TEST_DOMAIN)
TEST_RECORD = {TEST_WWW_RECORD: '10.0.0.24'}
def test_expand_and_contract(self):
"""Test expanding and shrinking bind."""
test_config = lifecycle_utils.get_charm_config(fatal=False)
states = test_config.get("target_deploy_status", {})
if not self.post_xenial_queens:
raise unittest.SkipTest("Test not supported before Queens")
domain = designate_utils.create_or_return_zone(
self.designate,
name=self.TEST_DOMAIN,
email="test@zaza.com")
designate_utils.create_or_return_recordset(
self.designate,
domain['id'],
'www',
'A',
[self.TEST_RECORD[self.TEST_WWW_RECORD]])
# Test record is in bind and designate
designate_utils.check_dns_entry(
self.designate,
self.TEST_RECORD[self.TEST_WWW_RECORD],
self.TEST_DOMAIN,
record_name=self.TEST_WWW_RECORD)
logging.info('Adding a designate-bind unit')
zaza.model.add_unit('designate-bind', wait_appear=True)
zaza.model.block_until_all_units_idle()
zaza.model.wait_for_application_states(states=states)
logging.info('Performing DNS lookup on all units')
designate_utils.check_dns_entry(
self.designate,
self.TEST_RECORD[self.TEST_WWW_RECORD],
self.TEST_DOMAIN,
record_name=self.TEST_WWW_RECORD)
units = zaza.model.get_status().applications['designate-bind']['units']
doomed_unit = sorted(units.keys())[0]
logging.info('Removing {}'.format(doomed_unit))
zaza.model.destroy_unit(
'designate-bind',
doomed_unit,
wait_disappear=True)
zaza.model.block_until_all_units_idle()
zaza.model.wait_for_application_states(states=states)
logging.info('Performing DNS lookup on all units')
designate_utils.check_dns_entry(
self.designate,
self.TEST_RECORD[self.TEST_WWW_RECORD],
self.TEST_DOMAIN,
record_name=self.TEST_WWW_RECORD)
@@ -0,0 +1,205 @@
"""Utilities for interacting with designate."""
import dns.resolver
import logging
import tenacity
import designateclient.exceptions
import zaza.model
def create_or_return_zone(client, name, email):
"""Create zone or return matching existing zone.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param name: Name of zone
:type name: str
:param email: Email address to associate with zone.
:type email: str
:returns: Zone
:rtype: designateclient.v2.zones.Zone
"""
try:
zone = client.zones.create(
name=name,
email=email)
except designateclient.exceptions.Conflict:
logging.info('{} zone already exists.'.format(name))
zones = [z for z in client.zones.list() if z['name'] == name]
assert len(zones) == 1, "Wrong number of zones found {}".format(zones)
zone = zones[0]
return zone
def create_or_return_recordset(client, zone_id, sub_domain, record_type, data):
"""Create recordset or return matching existing recordset.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param zone_id: uuid of zone
:type zone_id: str
:param sub_domain: Subdomain to associate records with
:type sub_domain: str
:param data: Dictionary of entries eg {'www.test.com': '10.0.0.24'}
:type data: dict
:returns: RecordSet
:rtype: designateclient.v2.recordsets.RecordSet
"""
try:
rs = client.recordsets.create(
zone_id,
sub_domain,
record_type,
data)
except designateclient.exceptions.Conflict:
logging.info('{} record already exists.'.format(data))
for r in client.recordsets.list(zone_id):
if r['name'].split('.')[0] == sub_domain:
rs = r
return rs
def get_designate_zone_objects(designate_client, domain_name=None,
domain_id=None):
"""Get all domains matching a given domain_name or domain_id.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param domain_name: Name of domain to lookup
:type domain_name: str
:param domain_id: UUID of domain to lookup
:type domain_id: str
:returns: List of Domain objects matching domain_name or domain_id
:rtype: [designateclient.v2.domains.Domain,]
"""
all_zones = designate_client.zones.list()
a = [z for z in all_zones
if z['name'] == domain_name or z['id'] == domain_id]
return a
def get_designate_domain_object(designate_client, domain_name):
"""Get the one and only domain matching the given domain_name.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param domain_name: Name of domain to lookup
:type domain_name:str
:returns: Domain with name domain_name
:rtype: designateclient.v2.domains.Domain
:raises: AssertionError
"""
dns_zone_id = get_designate_zone_objects(designate_client,
domain_name=domain_name)
msg = "Found {} domains for {}".format(
len(dns_zone_id),
domain_name)
assert len(dns_zone_id) == 1, msg
return dns_zone_id[0]
def get_designate_dns_records(designate_client, domain_name, ip):
"""Look for records in designate that match the given ip.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param domain_name: Name of domain to lookup
:type domain_name:str
:returns: List of Record objects matching matching IP address
:rtype: [designateclient.v2.records.Record,]
"""
dns_zone = get_designate_domain_object(designate_client, domain_name)
return [r for r in designate_client.recordsets.list(dns_zone['id'])
if r['records'] == ip]
def check_dns_record_exists(dns_server_ip, query_name, expected_ip,
retry_count=3):
"""Lookup a DNS record against the given dns server address.
:param dns_server_ip: IP address to run query against
:type dns_server_ip: str
:param query_name: Record to lookup
:type query_name: str
:param expected_ip: IP address expected to be associated with record.
:type expected_ip: str
:param retry_count: Number of times to retry query. Useful if waiting
for record to propagate.
:type retry_count: int
:raises: AssertionError
"""
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = [dns_server_ip]
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(retry_count),
wait=tenacity.wait_exponential(multiplier=1, min=2, max=10),
reraise=True):
with attempt:
logging.info("Checking record {} against {}".format(
query_name,
dns_server_ip))
answers = my_resolver.query(query_name)
for rdata in answers:
logging.info("Checking address returned by {} is correct".format(
dns_server_ip))
assert str(rdata) == expected_ip
def check_dns_entry(des_client, ip, domain, record_name):
"""Check that record for ip is in designate and in bind.
:param ip: IP address to lookup
:type ip: str
:param domain_name: Domain to look for record in
:type domain_name:str
:param record_name: record name
:type record_name: str
"""
check_dns_entry_in_designate(des_client, [ip], domain,
record_name=record_name)
check_dns_entry_in_bind(ip, record_name)
def check_dns_entry_in_designate(des_client, ip, domain, record_name=None):
"""Look for records in designate that match the given ip domain.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param ip: IP address to lookup in designate
:type ip: str
:param domain_name: Name of domain to lookup
:type domain_name:str
:param record_name: Retrieved record should have this name
:type record_name: str
:raises: AssertionError
"""
records = get_designate_dns_records(des_client, domain, ip)
assert records, "Record not found for {} in designate".format(ip)
logging.info('Found record in {} for {} in designate'.format(domain, ip))
if record_name:
recs = [r for r in records if r['name'] == record_name]
assert recs, "No DNS entry name matches expected name {}".format(
record_name)
logging.info('Found record in {} for {} in designate'.format(
domain,
record_name))
def check_dns_entry_in_bind(ip, record_name, model_name=None):
"""Check that record for ip address is in bind.
:param ip: IP address to lookup
:type ip: str
:param record_name: record name
:type record_name: str
"""
for addr in zaza.model.get_app_ips('designate-bind',
model_name=model_name):
logging.info("Checking {} is {} against ({})".format(
record_name,
ip,
addr))
check_dns_record_exists(addr, record_name, ip, retry_count=6)
@@ -18,6 +18,7 @@ import logging
import zaza.openstack.utilities.openstack as openstack_utils
CIRROS_IMAGE_NAME = "cirros"
CIRROS_ALT_IMAGE_NAME = "cirros_alt"
LTS_RELEASE = "bionic"
LTS_IMAGE_NAME = "bionic"
@@ -77,6 +78,18 @@ def add_cirros_image(glance_client=None, image_name=None):
image_name=image_name)
def add_cirros_alt_image(glance_client=None, image_name=None):
"""Add alt cirros image to the current deployment.
:param glance: Authenticated glanceclient
:type glance: glanceclient.Client
:param image_name: Label for the image in glance
:type image_name: str
"""
image_name = image_name or CIRROS_ALT_IMAGE_NAME
add_cirros_image(glance_client, image_name)
def add_lts_image(glance_client=None, image_name=None, release=None):
"""Add an Ubuntu LTS image to the current deployment.
@@ -0,0 +1,40 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for configuring glance-simplestreams-sync."""
import logging
import zaza.model as zaza_model
import zaza.openstack.utilities.generic as generic_utils
def sync_images():
"""Run image sync using an action.
Execute an initial image sync using an action to ensure that the
cloud is populated with images at the right point in time during
deployment.
"""
logging.info("Synchronising images using glance-simplestreams-sync")
generic_utils.assertActionRanOK(
zaza_model.run_action_on_leader(
"glance-simplestreams-sync",
"sync-images",
raise_on_failure=True,
action_params={},
)
)
@@ -24,7 +24,7 @@ import zaza.openstack.utilities.openstack as openstack_utils
@tenacity.retry(
retry=tenacity.retry_if_result(lambda images: len(images) < 3),
retry=tenacity.retry_if_result(lambda images: len(images) < 4),
wait=tenacity.wait_fixed(6), # interval between retries
stop=tenacity.stop_after_attempt(100)) # retry times
def retry_image_sync(glance_client):
@@ -42,7 +42,7 @@ def get_product_streams(url):
# There is a race between the images being available in glance and any
# metadata being written. Use tenacity to avoid this race.
client = requests.session()
json_data = client.get(url).text
json_data = client.get(url, verify=openstack_utils.get_cacert()).text
return json.loads(json_data)
@@ -61,7 +61,7 @@ class GlanceSimpleStreamsSyncTest(test_utils.OpenStackBaseTest):
cls.keystone_session)
def test_010_wait_for_image_sync(self):
"""Wait for images to be synced. Expect at least three."""
"""Wait for images to be synced. Expect at least four."""
self.assertTrue(retry_image_sync(self.glance_client))
def test_050_gss_permissions_regression_check_lp1611987(self):
@@ -94,31 +94,34 @@ class GlanceSimpleStreamsSyncTest(test_utils.OpenStackBaseTest):
'com.ubuntu.cloud:server:14.04:amd64',
'com.ubuntu.cloud:server:16.04:amd64',
'com.ubuntu.cloud:server:18.04:amd64',
'com.ubuntu.cloud:server:20.04:amd64',
]
uri = "streams/v1/auto.sync.json"
key = "url"
xenial_pike = openstack_utils.get_os_release('xenial_pike')
if openstack_utils.get_os_release() <= xenial_pike:
key = "publicURL"
catalog = self.keystone_client.service_catalog.get_endpoints()
ps_interface = catalog["product-streams"][0][key]
url = "{}/{}".format(ps_interface, uri)
# There is a race between the images being available in glance and the
# metadata being written for each image. Use tenacity to avoid this
# race and make the test idempotent.
@tenacity.retry(
retry=tenacity.retry_if_exception_type(AssertionError),
retry=tenacity.retry_if_exception_type(
(AssertionError, KeyError)
),
wait=tenacity.wait_fixed(10), reraise=True,
stop=tenacity.stop_after_attempt(10))
def _check_local_product_streams(url, expected_images):
stop=tenacity.stop_after_attempt(25))
def _check_local_product_streams(expected_images):
# Refresh from catalog as URL may change if swift in use.
ps_interface = self.keystone_client.service_catalog.url_for(
service_type='product-streams', interface='publicURL'
)
url = "{}/{}".format(ps_interface, uri)
logging.info('Retrieving product stream information'
' from {}'.format(url))
product_streams = get_product_streams(url)
logging.debug(product_streams)
images = product_streams["products"]
for image in expected_images:
self.assertIn(image, images)
_check_local_product_streams(url, expected_images)
_check_local_product_streams(expected_images)
logging.debug("Local product stream successful")
@@ -0,0 +1,69 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for Gnocchi tests."""
import logging
import zaza.model as model
import zaza.openstack.utilities.openstack as openstack_utils
def configure_s3_backend():
"""Inject S3 parameters from Swift for Gnocchi config."""
session = openstack_utils.get_overcloud_keystone_session()
ks_client = openstack_utils.get_keystone_session_client(session)
logging.info('Retrieving S3 connection data from Swift')
token_data = ks_client.tokens.get_token_data(session.get_token())
project_id = token_data['token']['project']['id']
user_id = token_data['token']['user']['id']
# Store URL to service providing S3 compatible API
for entry in token_data['token']['catalog']:
if entry['type'] == 's3':
for endpoint in entry['endpoints']:
if endpoint['interface'] == 'public':
s3_region = endpoint['region']
s3_endpoint = endpoint['url']
# Create AWS compatible application credentials in Keystone
ec2_creds = ks_client.ec2.create(user_id, project_id)
logging.info('Changing Gnocchi charm config to connect to S3')
model.set_application_config(
'gnocchi',
{'s3-endpoint-url': s3_endpoint,
's3-region-name': s3_region,
's3-access-key-id': ec2_creds.access,
's3-secret-access-key': ec2_creds.secret}
)
logging.info('Waiting for units to execute config-changed hook')
model.wait_for_agent_status()
logging.info('Waiting for units to reach target states')
model.wait_for_application_states(
states={
'gnocchi': {
'workload-status-': 'active',
'workload-status-message': 'Unit is ready'
},
'ceilometer': {
'workload-status': 'blocked',
'workload-status-message': 'Run the ' +
'ceilometer-upgrade action on the leader ' +
'to initialize ceilometer and gnocchi'
}
}
)
model.block_until_all_units_idle()
+53 -1
View File
@@ -16,9 +16,11 @@
"""Encapsulate Gnocchi testing."""
import boto3
import logging
import pprint
from gnocchiclient.v1 import client as gnocchi_client
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
@@ -58,3 +60,53 @@ class GnocchiTest(test_utils.OpenStackBaseTest):
"""
with self.pause_resume(self.services):
logging.info("Testing pause and resume")
class GnocchiS3Test(test_utils.OpenStackBaseTest):
"""Test Gnocchi for S3 storage backend."""
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(GnocchiS3Test, cls).setUpClass()
session = openstack_utils.get_overcloud_keystone_session()
ks_client = openstack_utils.get_keystone_session_client(session)
# Get token data so we can glean our user_id and project_id
token_data = ks_client.tokens.get_token_data(session.get_token())
project_id = token_data['token']['project']['id']
user_id = token_data['token']['user']['id']
# Store URL to service providing S3 compatible API
for entry in token_data['token']['catalog']:
if entry['type'] == 's3':
for endpoint in entry['endpoints']:
if endpoint['interface'] == 'public':
cls.s3_region = endpoint['region']
cls.s3_endpoint = endpoint['url']
# Create AWS compatible application credentials in Keystone
cls.ec2_creds = ks_client.ec2.create(user_id, project_id)
def test_s3_list_gnocchi_buckets(self):
"""Verify that the gnocchi buckets were created in the S3 backend."""
kwargs = {
'region_name': self.s3_region,
'aws_access_key_id': self.ec2_creds.access,
'aws_secret_access_key': self.ec2_creds.secret,
'endpoint_url': self.s3_endpoint,
'verify': self.cacert,
}
s3_client = boto3.client('s3', **kwargs)
bucket_names = ['gnocchi-measure', 'gnocchi-aggregates']
# Validate their presence
bucket_list = s3_client.list_buckets()
logging.info(pprint.pformat(bucket_list))
for bkt in bucket_list['Buckets']:
for gnocchi_bkt in bucket_names:
if bkt['Name'] == gnocchi_bkt:
break
else:
AssertionError('Bucket "{}" not found'.format(gnocchi_bkt))
+6 -70
View File
@@ -20,7 +20,6 @@ import logging
import os
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.juju as juju_utils
import zaza.openstack.configure.hacluster
@@ -35,78 +34,15 @@ class HaclusterTest(test_utils.OpenStackBaseTest):
def test_900_action_cleanup(self):
"""The services can be cleaned up."""
status = zaza.model.get_status().applications[self.application_name]
# libjuju juju status no longer has units for subordinate charms
# Use the application it is subordinate-to to check workload status
if status.get("units") is None and status.get("subordinate-to"):
primary_status = juju_utils.get_application_status(
status.get("subordinate-to")[0])
leader = None
for unit in primary_status["units"]:
if primary_status["units"][unit].get('leader'):
leader = unit
if primary_status["units"][leader].get("subordinates"):
for subordinate in primary_status["units"][leader]["subordinates"]:
# mysql-router is a subordinate from focal onwards
_app = subordinate.split('/')[0]
if _app != 'hacluster':
continue
logging.info("Cleaning {}".format(subordinate))
_action = "cleanup"
action_id = zaza.model.run_action(subordinate, "cleanup")
assert "success" in action_id.data["results"]["result"], (
"Set hacluster action {} failed: {}"
.format(_action, action_id.data))
logging.info("Cleaning action w/resource {}"
.format(subordinate))
params = {'resource': 'res_ks_haproxy'}
_action = "cleanup res_ks_haproxy"
zaza.model.run_action(subordinate, "cleanup",
action_params=params)
assert "success" in action_id.data["results"]["result"], (
"Set hacluster action {} failed: {}"
.format(_action, action_id.data))
zaza.model.run_action_on_leader(
self.application_name,
'cleanup',
raise_on_failure=True)
def test_910_pause_and_resume(self):
"""The services can be paused and resumed."""
logging.debug('Checking pause and resume actions...')
status = zaza.model.get_status().applications[self.application_name]
# libjuju juju status no longer has units for subordinate charms
# Use the application it is subordinate-to to check workload status
if status.get("units") is None and status.get("subordinate-to"):
primary_status = juju_utils.get_application_status(
status.get("subordinate-to")[0])
leader = None
for unit in primary_status["units"]:
if primary_status["units"][unit].get('leader'):
leader = unit
if primary_status["units"][leader].get("subordinates"):
for subordinate in primary_status["units"][leader]["subordinates"]:
# mysql-router is a subordinate from focal onwards
_app = subordinate.split('/')[0]
if _app != 'hacluster':
continue
logging.info("Pausing {}".format(subordinate))
zaza.model.run_action(subordinate, "pause")
zaza.model.block_until_unit_wl_status(
subordinate,
"maintenance")
logging.info("Resuming {}".format(subordinate))
zaza.model.run_action(subordinate, "resume")
zaza.model.block_until_unit_wl_status(subordinate, "active")
_states = {"hacluster": {
"workload-status": "active",
"workload-status-message": "Unit is ready and clustered"}}
zaza.model.wait_for_application_states(states=_states)
logging.debug('OK')
with self.pause_resume([]):
logging.info("Testing pause resume")
def _toggle_maintenance_and_wait(self, expected):
"""Configure cluster maintenance-mode.
@@ -26,6 +26,8 @@ DEMO_ADMIN_USER_PASSWORD = 'password'
DEMO_USER = 'demo'
DEMO_PASSWORD = 'password'
TEMPEST_ROLES = ['member', 'ResellerAdmin']
class BaseKeystoneTest(test_utils.OpenStackBaseTest):
"""Base for Keystone charm tests."""
@@ -14,6 +14,8 @@
"""Code for setting up keystone."""
import keystoneauth1
import zaza.openstack.utilities.openstack as openstack_utils
from zaza.openstack.charm_tests.keystone import (
BaseKeystoneTest,
@@ -24,6 +26,7 @@ from zaza.openstack.charm_tests.keystone import (
DEMO_ADMIN_USER_PASSWORD,
DEMO_USER,
DEMO_PASSWORD,
TEMPEST_ROLES,
)
@@ -115,3 +118,30 @@ def add_demo_user():
else:
# create only V3 user
_v3()
def _add_additional_roles(roles):
"""Add additional roles to this deployment.
:param ctxt: roles
:type ctxt: list
:returns: None
:rtype: None
"""
keystone_session = openstack_utils.get_overcloud_keystone_session()
keystone_client = openstack_utils.get_keystone_session_client(
keystone_session)
for role_name in roles:
try:
keystone_client.roles.create(role_name)
except keystoneauth1.exceptions.http.Conflict:
pass
def add_tempest_roles():
"""Add tempest roles to this deployment.
:returns: None
:rtype: None
"""
_add_additional_roles(TEMPEST_ROLES)
@@ -74,6 +74,15 @@ packages:
fip_1 = neutron_tests.floating_ips_from_instance(instance_1)[0]
fip_2 = neutron_tests.floating_ips_from_instance(instance_2)[0]
# Wait for the created share to become available before it gets used.
openstack_utils.resource_reaches_status(
self.manila_client.shares,
share.id,
wait_iteration_max_time=120,
stop_after_attempt=2,
expected_status="available",
msg="Waiting for a share to become available")
share.allow(access_type='ip', access=fip_1, access_level='rw')
share.allow(access_type='ip', access=fip_2, access_level='rw')
@@ -125,6 +134,5 @@ packages:
openstack_utils.ssh_command(
username, fip_2, 'instance-2',
'sudo cat /mnt/ceph/test'.format(
mount_path),
'sudo cat /mnt/ceph/test',
password=password, privkey=privkey, verify=verify)
+9 -4
View File
@@ -38,7 +38,8 @@ class MasakariTest(test_utils.OpenStackBaseTest):
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(MasakariTest, cls).setUpClass()
super(MasakariTest, cls).setUpClass(application_name="masakari")
cls.current_release = openstack_utils.get_os_release()
cls.keystone_session = openstack_utils.get_overcloud_keystone_session()
cls.model_name = zaza.model.get_juju_model()
cls.nova_client = openstack_utils.get_nova_session_client(
@@ -169,8 +170,12 @@ class MasakariTest(test_utils.OpenStackBaseTest):
zaza.openstack.configure.masakari.enable_hosts()
def test_instance_restart_on_fail(self):
"""Test singlee guest crash and recovery."""
raise unittest.SkipTest("Bug #1866638")
"""Test single guest crash and recovery."""
if self.current_release < openstack_utils.get_os_release(
'bionic_ussuri'):
raise unittest.SkipTest(
"Not supported on {}. Bug #1866638".format(
self.current_release))
vm_name = 'zaza-test-instance-failover'
vm = self.ensure_guest(vm_name)
_, unit_name = self.get_guests_compute_info(vm_name)
@@ -198,6 +203,6 @@ class MasakariTest(test_utils.OpenStackBaseTest):
unit_name,
vm.id,
model_name=self.model_name)
logging.info('{} pid is now {}'.format(vm_name, guest_pid))
logging.info('{} pid is now {}'.format(vm_name, new_guest_pid))
assert new_guest_pid and new_guest_pid != guest_pid, (
"Restart failed or never happened")
+196 -37
View File
@@ -71,10 +71,73 @@ class MySQLBaseTest(test_utils.OpenStackBaseTest):
self.non_leaders.append(unit)
return self.leader, self.non_leaders
def get_cluster_status(self):
"""Get cluster status.
Return cluster status dict from the cluster-status action or raise
assertion error.
:returns: Dictionary of cluster status
:rtype: dict
"""
logging.info("Running cluster-status action")
action = zaza.model.run_action_on_leader(
self.application,
"cluster-status",
action_params={})
assert action.data.get("results") is not None, (
"Cluster status action failed: No results: {}"
.format(action.data))
assert action.data["results"].get("cluster-status") is not None, (
"Cluster status action failed: No cluster-status: {}"
.format(action.data))
return json.loads(action.data["results"]["cluster-status"])
def get_rw_primary_node(self):
"""Get RW primary node.
Return RW primary node unit.
:returns: Unit object of primary node
:rtype: Union[Unit, None]
"""
_status = self.get_cluster_status()
_primary_ip = _status['groupInformationSourceMember']
if ":" in _primary_ip:
_primary_ip = _primary_ip.split(':')[0]
units = zaza.model.get_units(self.application_name)
for unit in units:
if _primary_ip in unit.public_address:
return unit
class MySQLCommonTests(MySQLBaseTest):
"""Common mysql charm tests."""
def test_110_mysqldump(self):
"""Backup mysql.
Run the mysqldump action.
"""
_db = "keystone"
_file_key = "mysqldump-file"
logging.info("Execute mysqldump action")
# Need to change strict mode to be able to dump database
if self.application_name == "percona-cluster":
action = zaza.model.run_action_on_leader(
self.application_name,
"set-pxc-strict-mode",
action_params={"mode": "MASTER"})
action = zaza.model.run_action_on_leader(
self.application,
"mysqldump",
action_params={"databases": _db})
_results = action.data["results"]
assert _db in _results[_file_key], (
"Mysqldump action failed: {}".format(action.data))
logging.info("Passed mysqldump action test.")
def test_910_restart_on_config_change(self):
"""Checking restart happens on config change.
@@ -266,10 +329,9 @@ class PerconaClusterColdStartTest(PerconaClusterBaseTest):
After bootstrapping a non-leader node, notify bootstrapped on the
leader node.
"""
_machines = list(
_machines = sorted(
juju_utils.get_machine_uuids_for_application(self.application))
# Stop Nodes
_machines.sort()
# Avoid hitting an update-status hook
logging.debug("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
@@ -412,33 +474,12 @@ class MySQLInnoDBClusterTests(MySQLCommonTests):
Run the cluster-status action.
"""
logging.info("Execute cluster-status action")
action = zaza.model.run_action_on_leader(
self.application,
"cluster-status",
action_params={})
cluster_status = json.loads(action.data["results"]["cluster-status"])
cluster_status = self.get_cluster_status()
assert "OK" in cluster_status["defaultReplicaSet"]["status"], (
"Cluster status action failed: {}"
.format(action.data))
"Cluster status is not OK: {}"
.format(cluster_status))
logging.info("Passed cluster-status action test.")
def test_110_mysqldump(self):
"""Backup mysql.
Run the mysqldump action.
"""
_db = "keystone"
_file_key = "mysqldump-file"
logging.info("Execute mysqldump action")
action = zaza.model.run_action_on_leader(
self.application,
"mysqldump",
action_params={"databases": _db})
_results = action.data["results"]
assert _db in _results[_file_key], (
"Mysqldump action failed: {}".format(action.data))
logging.info("Passed mysqldump action test.")
def test_120_set_cluster_option(self):
"""Set cluster option.
@@ -487,10 +528,9 @@ class MySQLInnoDBClusterColdStartTest(MySQLBaseTest):
After a cold start, reboot cluster from complete outage.
"""
_machines = list(
_machines = sorted(
juju_utils.get_machine_uuids_for_application(self.application))
# Stop Nodes
_machines.sort()
# Avoid hitting an update-status hook
logging.debug("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
@@ -523,25 +563,40 @@ class MySQLInnoDBClusterColdStartTest(MySQLBaseTest):
self.resolve_update_status_errors()
zaza.model.block_until_all_units_idle()
logging.debug("Wait for application states ...")
logging.debug("Clear error hooks after reboot ...")
for unit in zaza.model.get_units(self.application):
try:
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
except zaza.model.UnitError:
self.resolve_update_status_errors()
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
states = {self.application: {
"workload-status": "blocked",
"workload-status-message":
"MySQL InnoDB Cluster not healthy: None"}}
logging.debug("Wait for application states blocked ...")
states = {
self.application: {
"workload-status": "blocked",
"workload-status-message":
"MySQL InnoDB Cluster not healthy: None"},
"mysql-router": {
"workload-status": "blocked",
"workload-status-message":
"Failed to connect to MySQL"}}
zaza.model.wait_for_application_states(states=states)
logging.info("Execute reboot-cluster-from-complete-outage "
"action after cold boot ...")
action = zaza.model.run_action_on_leader(
self.application,
"reboot-cluster-from-complete-outage",
action_params={})
# We do not know which unit has the most up to date data
# run reboot-cluster-from-complete-outage until we get a success.
for unit in zaza.model.get_units(self.application):
action = zaza.model.run_action(
unit.entity_id,
"reboot-cluster-from-complete-outage",
action_params={})
if "Success" in action.data["results"].get("outcome"):
break
else:
logging.info(action.data["results"].get("output"))
assert "Success" in action.data["results"]["outcome"], (
"Reboot cluster from complete outage action failed: {}"
.format(action.data))
@@ -670,3 +725,107 @@ class MySQL8MigrationTests(MySQLBaseTest):
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {}))
class MySQLInnoDBClusterScaleTest(MySQLBaseTest):
"""Percona Cluster cold start tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running mysql-innodb-cluster scale tests."""
super().setUpClass()
cls.application = "mysql-innodb-cluster"
cls.test_config = lifecycle_utils.get_charm_config(fatal=False)
cls.states = cls.test_config.get("target_deploy_status", {})
def test_800_remove_leader(self):
"""Remove leader node.
We start with a three node cluster, remove one, down to two.
The cluster will be in waiting state.
"""
logging.info("Scale in test: remove leader")
leader, nons = self.get_leaders_and_non_leaders()
leader_unit = zaza.model.get_unit_from_name(leader)
zaza.model.destroy_unit(self.application_name, leader)
logging.info("Wait until unit is in waiting state ...")
zaza.model.block_until_unit_wl_status(nons[0], "waiting")
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info(
"Removing old unit from cluster: {} "
.format(leader_unit.public_address))
action = zaza.model.run_action(
nons[0],
"remove-instance",
action_params={
"address": leader_unit.public_address,
"force": True})
assert action.data.get("results") is not None, (
"Remove instance action failed: No results: {}"
.format(action.data))
def test_801_add_unit(self):
"""Add mysql-innodb-cluster node.
We start with two node cluster in waiting, add one, back to a full
cluster of three.
"""
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Adding unit after removed unit ...")
zaza.model.add_unit(self.application_name)
logging.info("Wait for application states ...")
zaza.model.wait_for_application_states(states=self.states)
def test_802_add_unit(self):
"""Add another mysql-innodb-cluster node.
We start with a three node full cluster, add another, up to a four node
cluster.
"""
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Adding unit after full cluster ...")
zaza.model.add_unit(self.application_name)
logging.info("Wait for application states ...")
zaza.model.wait_for_application_states(states=self.states)
def test_803_remove_fourth(self):
"""Remove mysql-innodb-cluster node.
We start with a four node full cluster, remove one, down to a three
node full cluster.
"""
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
leader, nons = self.get_leaders_and_non_leaders()
non_leader_unit = zaza.model.get_unit_from_name(nons[0])
zaza.model.destroy_unit(self.application_name, nons[0])
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Scale in test: back down to three")
zaza.model.wait_for_application_states(states=self.states)
logging.info(
"Removing old unit from cluster: {} "
.format(non_leader_unit.public_address))
action = zaza.model.run_action(
leader,
"remove-instance",
action_params={
"address": non_leader_unit.public_address,
"force": True})
assert action.data.get("results") is not None, (
"Remove instance action failed: No results: {}"
.format(action.data))
+153 -62
View File
@@ -26,7 +26,6 @@ import tenacity
import unittest
import zaza
import zaza.openstack.charm_tests.glance.setup as glance_setup
import zaza.openstack.charm_tests.nova.utils as nova_utils
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.configure.guest as guest
@@ -113,6 +112,10 @@ class NeutronGatewayTest(NeutronPluginApiSharedTests):
super(NeutronGatewayTest, cls).setUpClass(cls)
cls.services = cls._get_services()
# set up clients
cls.neutron_client = (
openstack_utils.get_neutron_session_client(cls.keystone_session))
_APP_NAME = 'neutron-gateway'
def test_401_enable_qos(self):
@@ -128,7 +131,8 @@ class NeutronGatewayTest(NeutronPluginApiSharedTests):
self._validate_openvswitch_agent_qos()
@tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60))
@tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60),
reraise=True, stop=tenacity.stop_after_attempt(8))
def _validate_openvswitch_agent_qos(self):
"""Validate that the qos extension is enabled in the ovs agent."""
# obtain the dhcp agent to identify the neutron-gateway host
@@ -281,36 +285,44 @@ class NeutronCreateNetworkTest(test_utils.OpenStackBaseTest):
# set up clients
cls.neutron_client = (
openstack_utils.get_neutron_session_client(cls.keystone_session))
cls.neutron_client.format = 'json'
_TEST_NET_NAME = 'test_net'
def test_400_create_network(self):
"""Create a network, verify that it exists, and then delete it."""
self._assert_test_network_doesnt_exist()
self._create_test_network()
net_id = self._assert_test_network_exists_and_return_id()
self._delete_test_network(net_id)
self._assert_test_network_doesnt_exist()
def _create_test_network(self):
logging.debug('Creating neutron network...')
self.neutron_client.format = 'json'
net_name = 'test_net'
# Verify that the network doesn't exist
networks = self.neutron_client.list_networks(name=net_name)
net_count = len(networks['networks'])
assert net_count == 0, (
"Expected zero networks, found {}".format(net_count))
# Create a network and verify that it exists
network = {'name': net_name}
network = {'name': self._TEST_NET_NAME}
self.neutron_client.create_network({'network': network})
networks = self.neutron_client.list_networks(name=net_name)
def _delete_test_network(self, net_id):
logging.debug('Deleting neutron network...')
self.neutron_client.delete_network(net_id)
def _assert_test_network_exists_and_return_id(self):
logging.debug('Confirming new neutron network...')
networks = self.neutron_client.list_networks(name=self._TEST_NET_NAME)
logging.debug('Networks: {}'.format(networks))
net_len = len(networks['networks'])
assert net_len == 1, (
"Expected 1 network, found {}".format(net_len))
logging.debug('Confirming new neutron network...')
network = networks['networks'][0]
assert network['name'] == net_name, "network ext_net not found"
assert network['name'] == self._TEST_NET_NAME, \
"network {} not found".format(self._TEST_NET_NAME)
return network['id']
# Cleanup
logging.debug('Deleting neutron network...')
self.neutron_client.delete_network(network['id'])
def _assert_test_network_doesnt_exist(self):
networks = self.neutron_client.list_networks(name=self._TEST_NET_NAME)
net_count = len(networks['networks'])
assert net_count == 0, (
"Expected zero networks, found {}".format(net_count))
class NeutronApiTest(NeutronCreateNetworkTest):
@@ -434,10 +446,10 @@ class NeutronOpenvSwitchTest(NeutronPluginApiSharedTests):
def test_101_neutron_sriov_config(self):
"""Verify data in the sriov agent config file."""
trusty_kilo = openstack_utils.get_os_release('trusty_kilo')
if self.current_os_release < trusty_kilo:
xenial_mitaka = openstack_utils.get_os_release('xenial_mitaka')
if self.current_os_release < xenial_mitaka:
logging.debug('Skipping test, sriov agent not supported on < '
'trusty/kilo')
'xenial/mitaka')
return
zaza.model.set_application_config(
@@ -589,18 +601,17 @@ class NeutronOpenvSwitchTest(NeutronPluginApiSharedTests):
logging.info('Testing pause resume')
class NeutronNetworkingTest(unittest.TestCase):
"""Ensure that openstack instances have valid networking."""
class NeutronNetworkingBase(test_utils.OpenStackBaseTest):
"""Base for checking openstack instances have valid networking."""
RESOURCE_PREFIX = 'zaza-neutrontests'
@classmethod
def setUpClass(cls):
"""Run class setup for running Neutron API Networking tests."""
cls.keystone_session = (
openstack_utils.get_overcloud_keystone_session())
cls.nova_client = (
openstack_utils.get_nova_session_client(cls.keystone_session))
super(NeutronNetworkingBase, cls).setUpClass()
cls.neutron_client = (
openstack_utils.get_neutron_session_client(cls.keystone_session))
# NOTE(fnordahl): in the event of a test failure we do not want to run
# tear down code as it will make debugging a problem virtually
# impossible. To alleviate each test method will set the
@@ -620,38 +631,6 @@ class NeutronNetworkingTest(unittest.TestCase):
server.id,
msg="server")
def test_instances_have_networking(self):
"""Validate North/South and East/West networking."""
guest.launch_instance(
glance_setup.LTS_IMAGE_NAME,
vm_name='{}-ins-1'.format(self.RESOURCE_PREFIX))
guest.launch_instance(
glance_setup.LTS_IMAGE_NAME,
vm_name='{}-ins-2'.format(self.RESOURCE_PREFIX))
instance_1 = self.nova_client.servers.find(
name='{}-ins-1'.format(self.RESOURCE_PREFIX))
instance_2 = self.nova_client.servers.find(
name='{}-ins-2'.format(self.RESOURCE_PREFIX))
def verify(stdin, stdout, stderr):
"""Validate that the SSH command exited 0."""
self.assertEqual(stdout.channel.recv_exit_status(), 0)
# Verify network from 1 to 2
self.validate_instance_can_reach_other(instance_1, instance_2, verify)
# Verify network from 2 to 1
self.validate_instance_can_reach_other(instance_2, instance_1, verify)
# Validate tenant to external network routing
self.validate_instance_can_reach_router(instance_1, verify)
self.validate_instance_can_reach_router(instance_2, verify)
# If we get here, it means the tests passed
self.run_tearDown = True
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
reraise=True, stop=tenacity.stop_after_attempt(8))
def validate_instance_can_reach_other(self,
@@ -708,7 +687,61 @@ class NeutronNetworkingTest(unittest.TestCase):
openstack_utils.ssh_command(
username, address, 'instance', 'ping -c 1 192.168.0.1',
password=password, privkey=privkey, verify=verify)
pass
@tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60),
reraise=True, stop=tenacity.stop_after_attempt(8),
retry=tenacity.retry_if_exception_type(AssertionError))
def check_server_state(self, nova_client, state, server_id=None,
server_name=None):
"""Wait for server to reach desired state.
:param nova_client: Nova client to use when checking status
:type nova_client: nova client
:param state: Target state for server
:type state: str
:param server_id: UUID of server to check
:type server_id: str
:param server_name: Name of server to check
:type server_name: str
:raises: AssertionError
"""
if server_name:
server_id = nova_client.servers.find(name=server_name).id
server = nova_client.servers.find(id=server_id)
assert server.status == state
@tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60),
reraise=True, stop=tenacity.stop_after_attempt(8),
retry=tenacity.retry_if_exception_type(AssertionError))
def check_neutron_agent_up(self, neutron_client, host_name):
"""Wait for agents to come up.
:param neutron_client: Neutron client to use when checking status
:type neutron_client: neutron client
:param host_name: The name of the host whose agents need checking
:type host_name: str
:raises: AssertionError
"""
for agent in neutron_client.list_agents()['agents']:
if agent['host'] == host_name:
assert agent['admin_state_up']
assert agent['alive']
def check_connectivity(self, instance_1, instance_2):
"""Run North/South and East/West connectivity tests."""
def verify(stdin, stdout, stderr):
"""Validate that the SSH command exited 0."""
self.assertEqual(stdout.channel.recv_exit_status(), 0)
# Verify network from 1 to 2
self.validate_instance_can_reach_other(instance_1, instance_2, verify)
# Verify network from 2 to 1
self.validate_instance_can_reach_other(instance_2, instance_1, verify)
# Validate tenant to external network routing
self.validate_instance_can_reach_router(instance_1, verify)
self.validate_instance_can_reach_router(instance_2, verify)
def floating_ips_from_instance(instance):
@@ -756,3 +789,61 @@ def ips_from_instance(instance, ip_type):
return list([
ip['addr'] for ip in instance.addresses['private']
if ip['OS-EXT-IPS:type'] == ip_type])
class NeutronNetworkingTest(NeutronNetworkingBase):
"""Ensure that openstack instances have valid networking."""
def test_instances_have_networking(self):
"""Validate North/South and East/West networking."""
self.launch_guests()
instance_1, instance_2 = self.retrieve_guests()
self.check_connectivity(instance_1, instance_2)
self.run_tearDown = True
class NeutronNetworkingVRRPTests(NeutronNetworkingBase):
"""Check networking when gateways are restarted."""
def test_gateway_failure(self):
"""Validate networking in the case of a gateway failure."""
instance_1, instance_2 = self.retrieve_guests()
if not all([instance_1, instance_2]):
self.launch_guests()
instance_1, instance_2 = self.retrieve_guests()
self.check_connectivity(instance_1, instance_2)
routers = self.neutron_client.list_routers(
name='provider-router')['routers']
assert len(routers) == 1, "Unexpected router count {}".format(
len(routers))
provider_router = routers[0]
l3_agents = self.neutron_client.list_l3_agent_hosting_routers(
router=provider_router['id'])['agents']
logging.info(
'Checking there are multiple L3 agents running tenant router')
assert len(l3_agents) == 2, "Unexpected l3 agent count {}".format(
len(l3_agents))
uc_ks_session = openstack_utils.get_undercloud_keystone_session()
uc_nova_client = openstack_utils.get_nova_session_client(uc_ks_session)
uc_neutron_client = openstack_utils.get_neutron_session_client(
uc_ks_session)
for agent in l3_agents:
gateway_hostname = agent['host']
gateway_server = uc_nova_client.servers.find(name=gateway_hostname)
logging.info("Shutting down {}".format(gateway_hostname))
gateway_server.stop()
self.check_server_state(
uc_nova_client,
'SHUTOFF',
server_name=gateway_hostname)
self.check_connectivity(instance_1, instance_2)
gateway_server.start()
self.check_server_state(
uc_nova_client,
'ACTIVE',
server_name=gateway_hostname)
self.check_neutron_agent_up(
uc_neutron_client,
gateway_hostname)
self.check_connectivity(instance_1, instance_2)
@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing neutron-api-plugin-arista."""
@@ -0,0 +1,79 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for setting up neutron-api-plugin-arista."""
import logging
import os
import tenacity
import zaza
import zaza.openstack.charm_tests.neutron_arista.utils as arista_utils
import zaza.openstack.utilities.openstack as openstack_utils
def download_arista_image():
"""Download arista-cvx-virt-test.qcow2 from a web server.
The download will happen only if the env var TEST_ARISTA_IMAGE_REMOTE has
been set, so you don't have to set it if you already have the image
locally.
If the env var TEST_ARISTA_IMAGE_LOCAL isn't set, it will be set to
`/tmp/arista-cvx-virt-test.qcow2`. This is where the image will be
downloaded to if TEST_ARISTA_IMAGE_REMOTE has been set.
"""
try:
os.environ['TEST_ARISTA_IMAGE_LOCAL']
except KeyError:
os.environ['TEST_ARISTA_IMAGE_LOCAL'] = ''
if not os.environ['TEST_ARISTA_IMAGE_LOCAL']:
os.environ['TEST_ARISTA_IMAGE_LOCAL'] \
= '/tmp/arista-cvx-virt-test.qcow2'
try:
if os.environ['TEST_ARISTA_IMAGE_REMOTE']:
logging.info('Downloading Arista image from {}'
.format(os.environ['TEST_ARISTA_IMAGE_REMOTE']))
openstack_utils.download_image(
os.environ['TEST_ARISTA_IMAGE_REMOTE'],
os.environ['TEST_ARISTA_IMAGE_LOCAL'])
except KeyError:
# TEST_ARISTA_IMAGE_REMOTE isn't set, which means the image is already
# available at TEST_ARISTA_IMAGE_LOCAL
pass
logging.info('Arista image can be found at {}'
.format(os.environ['TEST_ARISTA_IMAGE_LOCAL']))
def test_fixture():
"""Pass arista-virt-test-fixture's IP address to Neutron."""
fixture_ip_addr = arista_utils.fixture_ip_addr()
logging.info(
"{}'s IP address is '{}'. Passing it to {}..."
.format(arista_utils.FIXTURE_APP_NAME, fixture_ip_addr,
arista_utils.PLUGIN_APP_NAME))
zaza.model.set_application_config(arista_utils.PLUGIN_APP_NAME,
{'eapi-host': fixture_ip_addr})
logging.info('Waiting for {} to become ready...'.format(
arista_utils.PLUGIN_APP_NAME))
zaza.model.wait_for_agent_status()
zaza.model.wait_for_application_states()
for attempt in tenacity.Retrying(
wait=tenacity.wait_fixed(10), # seconds
stop=tenacity.stop_after_attempt(30),
reraise=True):
with attempt:
arista_utils.query_fixture_networks(fixture_ip_addr)
@@ -0,0 +1,53 @@
#!/usr/bin/env python3
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulating `neutron-api-plugin-arista` testing."""
import logging
import tenacity
import zaza.openstack.charm_tests.neutron.tests as neutron_tests
import zaza.openstack.charm_tests.neutron_arista.utils as arista_utils
class NeutronCreateAristaNetworkTest(neutron_tests.NeutronCreateNetworkTest):
"""Test creating an Arista Neutron network through the API."""
@classmethod
def setUpClass(cls):
"""Run class setup for running Neutron Arista tests."""
super(NeutronCreateAristaNetworkTest, cls).setUpClass()
logging.info('Waiting for Neutron to become ready...')
for attempt in tenacity.Retrying(
wait=tenacity.wait_fixed(5), # seconds
stop=tenacity.stop_after_attempt(12),
reraise=True):
with attempt:
cls.neutron_client.list_networks()
def _assert_test_network_exists_and_return_id(self):
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [self._TEST_NET_NAME])
return super(NeutronCreateAristaNetworkTest,
self)._assert_test_network_exists_and_return_id()
def _assert_test_network_doesnt_exist(self):
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [])
super(NeutronCreateAristaNetworkTest,
self)._assert_test_network_doesnt_exist()
@@ -0,0 +1,68 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common Arista-related utils."""
import json
import requests
import urllib3
import zaza
FIXTURE_APP_NAME = 'arista-virt-test-fixture'
PLUGIN_APP_NAME = 'neutron-api-plugin-arista'
def fixture_ip_addr():
"""Return the public IP address of the Arista test fixture."""
return zaza.model.get_units(FIXTURE_APP_NAME)[0].public_address
_FIXTURE_LOGIN = 'admin'
_FIXTURE_PASSWORD = 'password123'
def query_fixture_networks(ip_addr):
"""Query the Arista test fixture's list of networks."""
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
session = requests.Session()
session.headers['Content-Type'] = 'application/json'
session.headers['Accept'] = 'application/json'
session.verify = False
session.auth = (_FIXTURE_LOGIN, _FIXTURE_PASSWORD)
data = {
'id': 'Zaza {} tests'.format(PLUGIN_APP_NAME),
'method': 'runCmds',
'jsonrpc': '2.0',
'params': {
'timestamps': False,
'format': 'json',
'version': 1,
'cmds': ['show openstack networks']
}
}
response = session.post(
'https://{}/command-api/'.format(ip_addr),
data=json.dumps(data),
timeout=10 # seconds
)
result = []
for region in response.json()['result'][0]['regions'].values():
for tenant in region['tenants'].values():
for network in tenant['tenantNetworks'].values():
result.append(network['networkName'])
return result
+10
View File
@@ -35,5 +35,15 @@ FLAVORS = {
'ram': 8192,
'disk': 40,
'vcpus': 4},
'm1.tempest': {
'flavorid': 6,
'ram': 256,
'disk': 1,
'vcpus': 1},
'm2.tempest': {
'flavorid': 7,
'ram': 512,
'disk': 1,
'vcpus': 1},
}
KEYPAIR_NAME = 'zaza'
@@ -26,6 +26,12 @@ import zaza.openstack.utilities.openstack as openstack
import zaza.openstack.configure.guest
def ensure_lts_images():
"""Ensure that bionic and focal images are available for the tests."""
glance_setup.add_lts_image(image_name='bionic', release='bionic')
glance_setup.add_lts_image(image_name='focal', release='focal')
def add_amphora_image(image_url=None):
"""Add Octavia ``amphora`` test image to glance.
@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing OVN."""
+73
View File
@@ -0,0 +1,73 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate OVN testing."""
import logging
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
class BaseCharmOperationTest(test_utils.BaseCharmTest):
"""Base OVN Charm operation tests."""
# override if not possible to determine release pair from charm under test
release_application = None
@classmethod
def setUpClass(cls):
"""Run class setup for OVN charm operation tests."""
super(BaseCharmOperationTest, cls).setUpClass()
cls.services = ['NotImplemented'] # This must be overridden
cls.current_release = openstack_utils.get_os_release(
openstack_utils.get_current_os_release_pair(
cls.release_application or cls.application_name))
def test_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped, then resume and check
they are started.
"""
with self.pause_resume(self.services):
logging.info('Testing pause resume (services="{}")'
.format(self.services))
class CentralCharmOperationTest(BaseCharmOperationTest):
"""OVN Central Charm operation tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for OVN Central charm operation tests."""
super(CentralCharmOperationTest, cls).setUpClass()
cls.services = [
'ovn-northd',
'ovsdb-server',
]
class ChassisCharmOperationTest(BaseCharmOperationTest):
"""OVN Chassis Charm operation tests."""
release_application = 'ovn-central'
@classmethod
def setUpClass(cls):
"""Run class setup for OVN Chassis charm operation tests."""
super(ChassisCharmOperationTest, cls).setUpClass()
cls.services = [
'ovn-controller',
]
@@ -26,5 +26,8 @@ class PacemakerRemoteTest(unittest.TestCase):
def test_check_nodes_online(self):
"""Test that all nodes are online."""
zaza.openstack.configure.hacluster.remove_node(
'api',
'node1')
self.assertTrue(
zaza.openstack.configure.hacluster.check_all_nodes_online('api'))
@@ -430,6 +430,8 @@ class BasePolicydSpecialization(PolicydTest,
# now do the policyd override.
logging.info("Doing policyd override with: {}".format(self._rule))
self._set_policy_with(self._rule)
zaza_model.block_until_wl_status_info_starts_with(
self.application_name, "PO:")
zaza_model.block_until_all_units_idle()
# now make sure the operation fails
@@ -18,6 +18,7 @@ import json
import logging
import time
import uuid
import unittest
import juju
import tenacity
@@ -76,7 +77,9 @@ class RmqTests(test_utils.OpenStackBaseTest):
rmq_utils.configure_ssl_off(units)
# Publish and get amqp messages in all possible unit combinations.
# Qty of checks == (qty of units) ^ 2
# Qty of checks == qty_of_units * (qty_of_units - 1)
assert len(units) >= 2, 'Test is useful only with 2 units or more.'
amqp_msg_counter = 1
host_names = generic_utils.get_unit_hostnames(units)
@@ -87,6 +90,9 @@ class RmqTests(test_utils.OpenStackBaseTest):
for check_unit in units:
check_unit_name = check_unit.entity_id
if dest_unit_name == check_unit_name:
logging.info("Skipping check for this unit to itself.")
continue
check_unit_host = check_unit.public_address
check_unit_host_name = host_names[check_unit_name]
@@ -95,20 +101,20 @@ class RmqTests(test_utils.OpenStackBaseTest):
dest_unit_host,
amqp_msg_stamp)).upper()
# Publish amqp message
logging.debug('Publish message to: {} '
'({} {})'.format(dest_unit_host,
dest_unit_name,
dest_unit_host_name))
logging.info('Publish message to: {} '
'({} {})'.format(dest_unit_host,
dest_unit_name,
dest_unit_host_name))
rmq_utils.publish_amqp_message_by_unit(dest_unit,
amqp_msg, ssl=ssl,
port=port)
# Get amqp message
logging.debug('Get message from: {} '
'({} {})'.format(check_unit_host,
check_unit_name,
check_unit_host_name))
logging.info('Get message from: {} '
'({} {})'.format(check_unit_host,
check_unit_name,
check_unit_host_name))
amqp_msg_rcvd = self._retry_get_amqp_message(check_unit,
ssl=ssl,
@@ -116,8 +122,8 @@ class RmqTests(test_utils.OpenStackBaseTest):
# Validate amqp message content
if amqp_msg == amqp_msg_rcvd:
logging.debug('Message {} received '
'OK.'.format(amqp_msg_counter))
logging.info('Message {} received '
'OK.'.format(amqp_msg_counter))
else:
logging.error('Expected: {}'.format(amqp_msg))
logging.error('Actual: {}'.format(amqp_msg_rcvd))
@@ -131,8 +137,8 @@ class RmqTests(test_utils.OpenStackBaseTest):
def test_400_rmq_cluster_running_nodes(self):
"""Verify cluster status shows every cluster node as running member."""
logging.debug('Checking that all units are in cluster_status '
'running nodes...')
logging.info('Checking that all units are in cluster_status '
'running nodes...')
units = zaza.model.get_units(self.application_name)
@@ -148,8 +154,8 @@ class RmqTests(test_utils.OpenStackBaseTest):
unit for messages. Uses Standard amqp tcp port, no ssl.
"""
logging.debug('Checking amqp message publish/get on all units '
'(ssl off)...')
logging.info('Checking amqp message publish/get on all units '
'(ssl off)...')
units = zaza.model.get_units(self.application_name)
self._test_rmq_amqp_messages_all_units(units, ssl=False)
@@ -170,36 +176,13 @@ class RmqTests(test_utils.OpenStackBaseTest):
logging.info('Skipping SSL tests due to client'
' compatibility issues')
return
logging.debug('Checking amqp message publish/get on all units '
'(ssl on)...')
logging.info('Checking amqp message publish/get on all units '
'(ssl on)...')
self._test_rmq_amqp_messages_all_units(units,
ssl=True, port=5671)
logging.info('OK')
def test_410_rmq_amqp_messages_all_units_ssl_alt_port(self):
"""Send (and check) amqp messages to every rmq unit (alt ssl port).
Send amqp messages with ssl on, to every rmq unit and check
every rmq unit for messages. Custom ssl tcp port.
"""
units = zaza.model.get_units(self.application_name)
# http://pad.lv/1625044
if CompareHostReleases(get_series(units[0])) <= 'trusty':
logging.info('SKIP')
logging.info('Skipping SSL tests due to client'
' compatibility issues')
return
logging.debug('Checking amqp message publish/get on all units '
'(ssl on)...')
units = zaza.model.get_units(self.application_name)
self._test_rmq_amqp_messages_all_units(units,
ssl=True, port=5999)
logging.info('OK')
@tenacity.retry(
retry=tenacity.retry_if_result(lambda ret: ret is not None),
wait=tenacity.wait_fixed(30),
@@ -211,14 +194,14 @@ class RmqTests(test_utils.OpenStackBaseTest):
def test_412_rmq_management_plugin(self):
"""Enable and check management plugin."""
logging.debug('Checking tcp socket connect to management plugin '
'port on all rmq units...')
logging.info('Checking tcp socket connect to management plugin '
'port on all rmq units...')
units = zaza.model.get_units(self.application_name)
mgmt_port = 15672
# Enable management plugin
logging.debug('Enabling management_plugin charm config option...')
logging.info('Enabling management_plugin charm config option...')
config = {'management_plugin': 'True'}
zaza.model.set_application_config('rabbitmq-server', config)
rmq_utils.wait_for_cluster()
@@ -227,10 +210,10 @@ class RmqTests(test_utils.OpenStackBaseTest):
ret = self._retry_port_knock_units(units, mgmt_port)
self.assertIsNone(ret, msg=ret)
logging.debug('Connect to all units (OK)')
logging.info('Connect to all units (OK)')
# Disable management plugin
logging.debug('Disabling management_plugin charm config option...')
logging.info('Disabling management_plugin charm config option...')
config = {'management_plugin': 'False'}
zaza.model.set_application_config('rabbitmq-server', config)
rmq_utils.wait_for_cluster()
@@ -259,21 +242,21 @@ class RmqTests(test_utils.OpenStackBaseTest):
host_names = generic_utils.get_unit_hostnames(units)
# check_rabbitmq monitor
logging.debug('Checking nrpe check_rabbitmq on units...')
logging.info('Checking nrpe check_rabbitmq on units...')
cmds = ['egrep -oh /usr/local.* /etc/nagios/nrpe.d/'
'check_rabbitmq.cfg']
ret = self._retry_check_commands_on_units(cmds, units)
self.assertIsNone(ret, msg=ret)
# check_rabbitmq_queue monitor
logging.debug('Checking nrpe check_rabbitmq_queue on units...')
logging.info('Checking nrpe check_rabbitmq_queue on units...')
cmds = ['egrep -oh /usr/local.* /etc/nagios/nrpe.d/'
'check_rabbitmq_queue.cfg']
ret = self._retry_check_commands_on_units(cmds, units)
self.assertIsNone(ret, msg=ret)
# check dat file existence
logging.debug('Checking nrpe dat file existence on units...')
logging.info('Checking nrpe dat file existence on units...')
for u in units:
unit_host_name = host_names[u.entity_id]
@@ -291,7 +274,7 @@ class RmqTests(test_utils.OpenStackBaseTest):
def test_910_pause_and_resume(self):
"""The services can be paused and resumed."""
logging.debug('Checking pause and resume actions...')
logging.info('Checking pause and resume actions...')
unit = zaza.model.get_units(self.application_name)[0]
assert unit.workload_status == "active"
@@ -307,21 +290,21 @@ class RmqTests(test_utils.OpenStackBaseTest):
assert unit.workload_status == "active"
rmq_utils.wait_for_cluster()
logging.debug('OK')
logging.info('OK')
def test_911_cluster_status(self):
"""Test rabbitmqctl cluster_status action can be returned."""
logging.debug('Checking cluster status action...')
logging.info('Checking cluster status action...')
unit = zaza.model.get_units(self.application_name)[0]
action = zaza.model.run_action(unit.entity_id, "cluster-status")
self.assertIsInstance(action, juju.action.Action)
logging.debug('OK')
logging.info('OK')
def test_912_check_queues(self):
"""Test rabbitmqctl check_queues action can be returned."""
logging.debug('Checking cluster status action...')
logging.info('Checking cluster status action...')
unit = zaza.model.get_units(self.application_name)[0]
action = zaza.model.run_action(unit.entity_id, "check-queues")
@@ -329,10 +312,11 @@ class RmqTests(test_utils.OpenStackBaseTest):
def test_913_list_unconsumed_queues(self):
"""Test rabbitmqctl list-unconsumed-queues action can be returned."""
logging.debug('Checking list-unconsumed-queues action...')
logging.info('Checking list-unconsumed-queues action...')
unit = zaza.model.get_units(self.application_name)[0]
self._test_rmq_amqp_messages_all_units([unit])
units = zaza.model.get_units(self.application_name)
self._test_rmq_amqp_messages_all_units(units)
unit = units[0]
action = zaza.model.run_action(unit.entity_id,
'list-unconsumed-queues')
self.assertIsInstance(action, juju.action.Action)
@@ -352,9 +336,17 @@ class RmqTests(test_utils.OpenStackBaseTest):
# Since we just reused _test_rmq_amqp_messages_all_units, we should
# have created the queue if it didn't already exist, but all messages
# should have already been consumed.
assert queue_data['messages'] == 0, 'Found unexpected message count.'
if queue_data['messages'] != 0:
logging.error(
'{} has {} remaining messages in {} instead of 0.'.format(
unit.entity_id, queue_data['messages'],
queue_data['name']))
if queue_data['messages'] >= 1:
logging.error('One message is: {}'.format(
self._retry_get_amqp_message(unit)))
assert False, 'Found unexpected message count.'
logging.debug('OK')
logging.info('OK')
@tenacity.retry(
retry=tenacity.retry_if_result(lambda errors: bool(errors)),
@@ -363,38 +355,68 @@ class RmqTests(test_utils.OpenStackBaseTest):
def _retry_check_unit_cluster_nodes(self, u, unit_node_names):
return rmq_utils.check_unit_cluster_nodes(u, unit_node_names)
def test_921_remove_unit(self):
@unittest.skip(
"Skipping as a significant rework is required, see"
"https://github.com/openstack-charmers/zaza-openstack-tests/issues/290"
)
def test_921_remove_and_add_unit(self):
"""Test if unit cleans up when removed from Rmq cluster.
Test if a unit correctly cleans up by removing itself from the
RabbitMQ cluster on removal
RabbitMQ cluster on removal.
Add the unit back to the cluster at the end of the test case to
avoid side-effects.
"""
logging.debug('Checking that units correctly clean up after '
'themselves on unit removal...')
logging.info('Checking that units correctly clean up after '
'themselves on unit removal...')
config = {'min-cluster-size': '2'}
zaza.model.set_application_config('rabbitmq-server', config)
rmq_utils.wait_for_cluster()
units = zaza.model.get_units(self.application_name)
removed_unit = units[-1]
left_units = units[:-1]
all_units = zaza.model.get_units(self.application_name)
removed_unit = all_units[-1]
left_units = all_units[:-1]
logging.info('Simulating unit {} removal'.format(removed_unit))
zaza.model.run_on_unit(removed_unit.entity_id, 'hooks/stop')
logging.info('Waiting until unit {} reaches "waiting" state'
''.format(removed_unit))
zaza.model.block_until_unit_wl_status(removed_unit.entity_id,
"waiting")
unit_host_names = generic_utils.get_unit_hostnames(left_units)
unit_node_names = []
for unit in unit_host_names:
unit_node_names.append('rabbit@{}'.format(unit_host_names[unit]))
errors = []
def check_units(units):
unit_host_names = generic_utils.get_unit_hostnames(units)
unit_node_names = []
for unit in unit_host_names:
unit_node_names.append('rabbit@{}'.format(
unit_host_names[unit]))
errors = []
for u in left_units:
e = self._retry_check_unit_cluster_nodes(u,
unit_node_names)
if e:
errors.append(e)
for u in units:
e = self._retry_check_unit_cluster_nodes(u,
unit_node_names)
if e:
errors.append(e)
self.assertFalse(errors, msg=errors)
logging.debug('OK')
self.assertFalse(errors, msg=errors)
logging.info('Checking that all units except for {} are present'
'in the cluster'.format(removed_unit))
check_units(left_units)
logging.info('Re-adding the removed unit {} back to the cluster'
'by simulating the upgrade-charm event'
''.format(removed_unit))
# TODO(dmitriis): Fix the rabbitmq charm to add a proper way to add a
# unit back to the cluster and replace this.
zaza.model.run_on_unit(removed_unit.entity_id, 'hooks/upgrade-charm')
logging.info('Waiting until unit {} reaches "active" state'
''.format(removed_unit))
zaza.model.block_until_unit_wl_status(removed_unit.entity_id,
"active")
logging.info('Checking that all units are present in the cluster')
check_units(all_units)
logging.info('OK')
+34 -7
View File
@@ -178,15 +178,28 @@ class SwiftGlobalReplicationTests(test_utils.OpenStackBaseTest):
logging.info('Deleting container {}'.format(container['name']))
cls.swift_region1.delete_container(container['name'])
def test_two_regions_any_zones_two_replicas(self):
"""Create an object with two replicas across two regions."""
def test_901_two_regions_any_zones_two_replicas(self):
"""Create an object with two replicas across two regions.
We set write affinity to write the first copy in the local
region of the proxy used to perform the write, the other
replica will land in the remote region.
"""
swift_utils.apply_proxy_config(
self.region1_proxy_app,
{
'write-affinity': 'r1, r2',
'write-affinity': 'r1',
'write-affinity-node-count': '1',
'replicas': '2'},
self.region1_model_name)
swift_utils.apply_proxy_config(
self.region2_proxy_app,
{
'write-affinity': 'r2',
'write-affinity-node-count': '1',
'replicas': '2'},
self.region2_model_name)
logging.info('Proxy configs updated in both regions')
container_name, obj_name, obj_replicas = swift_utils.create_object(
self.swift_region1,
self.region1_proxy_app,
@@ -204,15 +217,29 @@ class SwiftGlobalReplicationTests(test_utils.OpenStackBaseTest):
len(obj_replicas.all_zones),
2)
def test_two_regions_any_zones_three_replicas(self):
"""Create an object with three replicas across two regions."""
def test_902_two_regions_any_zones_three_replicas(self):
"""Create an object with three replicas across two regions.
We set write affinity to write the first copy in the local
region of the proxy used to perform the write, at least one
of the other two replicas will end up in the opposite region
based on primary partitions in the ring.
"""
swift_utils.apply_proxy_config(
self.region1_proxy_app,
{
'write-affinity': 'r1, r2',
'write-affinity': 'r1',
'write-affinity-node-count': '1',
'replicas': '3'},
self.region1_model_name)
swift_utils.apply_proxy_config(
self.region2_proxy_app,
{
'write-affinity': 'r2',
'write-affinity-node-count': '1',
'replicas': '3'},
self.region2_model_name)
logging.info('Proxy configs updated in both regions')
container_name, obj_name, obj_replicas = swift_utils.create_object(
self.swift_region1,
self.region1_proxy_app,
@@ -258,7 +285,7 @@ class S3APITest(test_utils.OpenStackBaseTest):
# Create AWS compatible application credentials in Keystone
cls.ec2_creds = ks_client.ec2.create(user_id, project_id)
def test_s3_list_buckets(self):
def test_901_s3_list_buckets(self):
"""Use S3 API to list buckets."""
# We use a mix of the high- and low-level API with common arguments
kwargs = {
@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and running tempest."""
+313
View File
@@ -0,0 +1,313 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for configuring and initializing tempest."""
import jinja2
import urllib.parse
import subprocess
import zaza.utilities.deployment_env as deployment_env
import zaza.openstack.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.glance.setup as glance_setup
SETUP_ENV_VARS = {
'neutron': ['TEST_GATEWAY', 'TEST_CIDR_EXT', 'TEST_FIP_RANGE',
'TEST_NAMESERVER', 'TEST_CIDR_PRIV'],
'swift': ['TEST_SWIFT_IP'],
}
TEMPEST_FLAVOR_NAME = 'm1.tempest'
TEMPEST_ALT_FLAVOR_NAME = 'm2.tempest'
TEMPEST_SVC_LIST = ['ceilometer', 'cinder', 'glance', 'heat', 'horizon',
'ironic', 'neutron', 'nova', 'sahara', 'swift', 'trove',
'zaqar']
def add_application_ips(ctxt):
"""Add application access IPs to context.
:param ctxt: Context dictionary
:type ctxt: dict
:returns: None
:rtype: None
"""
ctxt['keystone'] = juju_utils.get_application_ip('keystone')
ctxt['dashboard'] = juju_utils.get_application_ip('openstack-dashboard')
ctxt['ncc'] = juju_utils.get_application_ip('nova-cloud-controller')
def add_nova_config(ctxt, keystone_session):
"""Add nova config to context.
:param ctxt: Context dictionary
:type ctxt: dict
:param keystone_session: keystoneauth1.session.Session object
:type: keystoneauth1.session.Session
:returns: None
:rtype: None
"""
nova_client = openstack_utils.get_nova_session_client(
keystone_session)
for flavor in nova_client.flavors.list():
if flavor.name == TEMPEST_FLAVOR_NAME:
ctxt['flavor_ref'] = flavor.id
if flavor.name == TEMPEST_ALT_FLAVOR_NAME:
ctxt['flavor_ref_alt'] = flavor.id
def add_neutron_config(ctxt, keystone_session):
"""Add neutron config to context.
:param ctxt: Context dictionary
:type ctxt: dict
:param keystone_session: keystoneauth1.session.Session object
:type: keystoneauth1.session.Session
:returns: None
:rtype: None
"""
current_release = openstack_utils.get_os_release()
focal_ussuri = openstack_utils.get_os_release('focal_ussuri')
neutron_client = openstack_utils.get_neutron_session_client(
keystone_session)
net = neutron_client.find_resource("network", "ext_net")
ctxt['ext_net'] = net['id']
router = neutron_client.find_resource("router", "provider-router")
ctxt['provider_router_id'] = router['id']
# For focal+ with OVN, we use the same settings as upstream gate.
# This is because the l3_agent_scheduler extension is only
# applicable for OVN when conventional layer-3 agent enabled:
# https://docs.openstack.org/networking-ovn/2.0.1/features.html
# This enables test_list_show_extensions to run successfully.
if current_release >= focal_ussuri:
extensions = ('address-scope,agent,allowed-address-pairs,'
'auto-allocated-topology,availability_zone,'
'binding,default-subnetpools,external-net,'
'extra_dhcp_opt,multi-provider,net-mtu,'
'network_availability_zone,network-ip-availability,'
'port-security,provider,quotas,rbac-address-scope,'
'rbac-policies,standard-attr-revisions,security-group,'
'standard-attr-description,subnet_allocation,'
'standard-attr-tag,standard-attr-timestamp,trunk,'
'quota_details,router,extraroute,ext-gw-mode,'
'fip-port-details,pagination,sorting,project-id,'
'dns-integration,qos')
ctxt['neutron_api_extensions'] = extensions
else:
ctxt['neutron_api_extensions'] = 'all'
def add_glance_config(ctxt, keystone_session):
"""Add glance config to context.
:param ctxt: Context dictionary
:type ctxt: dict
:param keystone_session: keystoneauth1.session.Session object
:type: keystoneauth1.session.Session
:returns: None
:rtype: None
"""
glance_client = openstack_utils.get_glance_session_client(
keystone_session)
image = openstack_utils.get_images_by_name(
glance_client, glance_setup.CIRROS_IMAGE_NAME)
image_alt = openstack_utils.get_images_by_name(
glance_client, glance_setup.CIRROS_ALT_IMAGE_NAME)
if image:
ctxt['image_id'] = image[0].id
if image_alt:
ctxt['image_alt_id'] = image_alt[0].id
def add_cinder_config(ctxt, keystone_session):
"""Add cinder config to context.
:param ctxt: Context dictionary
:type ctxt: dict
:param keystone_session: keystoneauth1.session.Session object
:type: keystoneauth1.session.Session
:returns: None
:rtype: None
"""
volume_types = ['volumev2', 'volumev3']
keystone_client = openstack_utils.get_keystone_session_client(
keystone_session)
for volume_type in volume_types:
service = keystone_client.services.list(type=volume_type)
if service:
ctxt['catalog_type'] = volume_type
break
def add_keystone_config(ctxt, keystone_session):
"""Add keystone config to context.
:param ctxt: Context dictionary
:type ctxt: dict
:param keystone_session: keystoneauth1.session.Session object
:type: keystoneauth1.session.Session
:returns: None
:rtype: None
"""
keystone_client = openstack_utils.get_keystone_session_client(
keystone_session)
domain = keystone_client.domains.find(name="admin_domain")
ctxt['default_domain_id'] = domain.id
def get_service_list(keystone_session):
"""Retrieve list of services from keystone.
:param keystone_session: keystoneauth1.session.Session object
:type: keystoneauth1.session.Session
:returns: None
:rtype: None
"""
keystone_client = openstack_utils.get_keystone_session_client(
keystone_session)
return [s.name for s in keystone_client.services.list() if s.enabled]
def add_environment_var_config(ctxt, services):
"""Add environment variable config to context.
:param ctxt: Context dictionary
:type ctxt: dict
:returns: None
:rtype: None
"""
deploy_env = deployment_env.get_deployment_context()
for svc, env_vars in SETUP_ENV_VARS.items():
if svc in services:
for var in env_vars:
value = deploy_env.get(var)
if value:
ctxt[var.lower()] = value
else:
raise ValueError(
("Environment variables {} must all be set to run this"
" test").format(', '.join(env_vars)))
def add_auth_config(ctxt):
"""Add authorization config to context.
:param ctxt: Context dictionary
:type ctxt: dict
:returns: None
:rtype: None
"""
overcloud_auth = openstack_utils.get_overcloud_auth()
ctxt['proto'] = urllib.parse.urlparse(overcloud_auth['OS_AUTH_URL']).scheme
ctxt['admin_username'] = overcloud_auth['OS_USERNAME']
ctxt['admin_password'] = overcloud_auth['OS_PASSWORD']
if overcloud_auth['API_VERSION'] == 3:
ctxt['admin_project_name'] = overcloud_auth['OS_PROJECT_NAME']
ctxt['admin_domain_name'] = overcloud_auth['OS_DOMAIN_NAME']
ctxt['default_credentials_domain_name'] = (
overcloud_auth['OS_PROJECT_DOMAIN_NAME'])
def get_tempest_context():
"""Generate the tempest config context.
:returns: Context dictionary
:rtype: dict
"""
keystone_session = openstack_utils.get_overcloud_keystone_session()
ctxt = {}
ctxt_funcs = {
'nova': add_nova_config,
'neutron': add_neutron_config,
'glance': add_glance_config,
'cinder': add_cinder_config,
'keystone': add_keystone_config}
ctxt['enabled_services'] = get_service_list(keystone_session)
ctxt['disabled_services'] = list(
set(TEMPEST_SVC_LIST) - set(ctxt['enabled_services']))
add_application_ips(ctxt)
for svc_name, ctxt_func in ctxt_funcs.items():
if svc_name in ctxt['enabled_services']:
ctxt_func(ctxt, keystone_session)
add_environment_var_config(ctxt, ctxt['enabled_services'])
add_auth_config(ctxt)
return ctxt
def render_tempest_config(target_file, ctxt, template_name):
"""Render tempest config for specified config file and template.
:param target_file: Name of file to render config to
:type target_file: str
:param ctxt: Context dictionary
:type ctxt: dict
:param template_name: Name of template file
:type template_name: str
:returns: None
:rtype: None
"""
jenv = jinja2.Environment(loader=jinja2.PackageLoader(
'zaza.openstack',
'charm_tests/tempest/templates'))
template = jenv.get_template(template_name)
with open(target_file, 'w') as f:
f.write(template.render(ctxt))
def setup_tempest(tempest_template, accounts_template):
"""Initialize tempest and render tempest config.
:param tempest_template: tempest.conf template
:type tempest_template: module
:param accounts_template: accounts.yaml template
:type accounts_template: module
:returns: None
:rtype: None
"""
try:
subprocess.check_call(['tempest', 'workspace', 'remove', '--rmdir',
'--name', 'tempest-workspace'])
except subprocess.CalledProcessError:
pass
try:
subprocess.check_call(['tempest', 'init', 'tempest-workspace'])
except subprocess.CalledProcessError:
pass
render_tempest_config(
'tempest-workspace/etc/tempest.conf',
get_tempest_context(),
tempest_template)
render_tempest_config(
'tempest-workspace/etc/accounts.yaml',
get_tempest_context(),
accounts_template)
def render_tempest_config_keystone_v2():
"""Render tempest config for Keystone V2 API.
:returns: None
:rtype: None
"""
setup_tempest('tempest_v2.j2', 'accounts.j2')
def render_tempest_config_keystone_v3():
"""Render tempest config for Keystone V3 API.
:returns: None
:rtype: None
"""
setup_tempest('tempest_v3.j2', 'accounts.j2')
@@ -0,0 +1,6 @@
- username: 'demo'
tenant_name: 'demo'
password: 'pass'
- username: 'alt_demo'
tenant_name: 'alt_demo'
password: 'secret'
@@ -0,0 +1,100 @@
[DEFAULT]
debug = false
use_stderr = false
log_file = tempest.log
[auth]
test_accounts_file = accounts.yaml
default_credentials_domain_name = Default
admin_username = {{ admin_username }}
admin_project_name = admin
admin_password = {{ admin_password }}
admin_domain_name = Default
{% if 'nova' in enabled_services %}
[compute]
image_ref = {{ image_id }}
image_ref_alt = {{ image_alt_id }}
flavor_ref = {{ flavor_ref }}
flavor_ref_alt = {{ flavor_ref_alt }}
region = RegionOne
min_compute_nodes = 3
# TODO: review this as its release specific
# min_microversion = 2.2
# max_microversion = latest
[compute-feature-enabled]
console_output = true
resize = true
live_migration = true
block_migration_for_live_migration = true
attach_encrypted_volume = false
{% endif %}
{% if 'keystone' in enabled_services %}
[identity]
uri = {proto}://{{ keystone }}:5000/v2.0
auth_version = v2
admin_role = Admin
region = RegionOne
disable_ssl_certificate_validation = true
[identity-feature-enabled]
api_v2 = true
api_v3 = false
{% endif %}
{% if 'glance' in enabled_services %}
[image]
http_image = http://{{ test_swift_ip }}:80/swift/v1/images/cirros-0.3.4-x86_64-uec.tar.gz
{% endif %}
{% if 'neutron' in enabled_services %}
[network]
project_network_cidr = {{ test_cidr_priv }}
public_network_id = {{ ext_net }}
dns_servers = {{ test_nameserver }}
project_networks_reachable = false
[network-feature-enabled]
ipv6 = false
{% endif %}
{% if 'heat' in enabled_services %}
[orchestration]
stack_owner_role = Admin
instance_type = m1.small
keypair_name = testkey
{% endif %}
[oslo_concurrency]
lock_path = /tmp
[scenario]
img_dir = /home/ubuntu/images
img_file = cirros-0.3.4-x86_64-disk.img
img_container_format = bare
img_disk_format = qcow2
[validation]
run_validation = true
image_ssh_user = cirros
[service_available]
{% for svc in enabled_services -%}
{{ svc }} = true
{% endfor -%}
{% for svc in disabled_services -%}
{{ svc }} = false
{% endfor %}
{% if 'cinder' in enabled_services %}
[volume]
backend_names = cinder-ceph
storage_protocol = ceph
catalog_type = {{ catalog_type }}
[volume-feature-enabled]
backup = false
{% endif %}
@@ -0,0 +1,104 @@
[DEFAULT]
debug = false
use_stderr = false
log_file = tempest.log
[auth]
test_accounts_file = accounts.yaml
default_credentials_domain_name = {{ default_credentials_domain_name }}
admin_username = {{ admin_username }}
admin_project_name = {{ admin_project_name }}
admin_password = {{ admin_password }}
admin_domain_name = {{ admin_domain_name }}
{% if 'nova' in enabled_services %}
[compute]
image_ref = {{ image_id }}
image_ref_alt = {{ image_alt_id }}
flavor_ref = {{ flavor_ref }}
flavor_ref_alt = {{ flavor_ref_alt }}
min_compute_nodes = 3
# TODO: review this as its release specific
# min_microversion = 2.2
# max_microversion = latest
[compute-feature-enabled]
console_output = true
resize = true
live_migration = true
block_migration_for_live_migration = true
attach_encrypted_volume = false
{% endif %}
{% if 'keystone' in enabled_services %}
[identity]
uri = {{ proto }}://{{ keystone }}:5000/v2.0
uri_v3 = {{ proto }}://{{ keystone }}:5000/v3
auth_version = v3
admin_role = Admin
region = RegionOne
default_domain_id = {{ default_domain_id }}
admin_domain_scope = true
disable_ssl_certificate_validation = true
[identity-feature-enabled]
api_v2 = false
api_v3 = true
{% endif %}
{% if 'glance' in enabled_services %}
[image]
http_image = http://{{ test_swift_ip }}:80/swift/v1/images/cirros-0.3.4-x86_64-uec.tar.gz
{% endif %}
{% if 'neutron' in enabled_services %}
[network]
project_network_cidr = {{ test_cidr_priv }}
public_network_id = {{ ext_net }}
dns_servers = {{ test_nameserver }}
project_networks_reachable = false
floating_network_name = {{ ext_net }}
[network-feature-enabled]
ipv6 = false
api_extensions = {{ neutron_api_extensions }}
{% endif %}
{% if 'heat' in enabled_services %}
[orchestration]
stack_owner_role = Admin
instance_type = m1.small
keypair_name = testkey
{% endif %}
[oslo_concurrency]
lock_path = /tmp
[scenario]
img_dir = /home/ubuntu/images
img_file = cirros-0.3.4-x86_64-disk.img
img_container_format = bare
img_disk_format = qcow2
[validation]
run_validation = true
image_ssh_user = cirros
[service_available]
{% for svc in enabled_services -%}
{{ svc }} = true
{% endfor -%}
{% for svc in disabled_services -%}
{{ svc }} = false
{% endfor %}
{% if 'cinder' in enabled_services %}
[volume]
backend_names = cinder-ceph
storage_protocol = ceph
catalog_type = {{ catalog_type }}
[volume-feature-enabled]
backup = false
{% endif %}
@@ -0,0 +1,78 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for running tempest tests."""
import os
import subprocess
import zaza
import zaza.charm_lifecycle.utils
import zaza.charm_lifecycle.test
import tempfile
class TempestTest():
"""Tempest test class."""
test_runner = zaza.charm_lifecycle.test.DIRECT
def run(self):
"""Run tempest tests as specified in tests/tests.yaml.
Test keys are parsed from ['tests_options']['tempest']['model'], where
valid test keys are: smoke (bool), whitelist (list of tests), blacklist
(list of tests), and regex (list of regex's).
:returns: Status of tempest run
:rtype: bool
"""
charm_config = zaza.charm_lifecycle.utils.get_charm_config()
tempest_options = ['tempest', 'run', '--workspace',
'tempest-workspace', '--config',
'tempest-workspace/etc/tempest.conf']
for model_alias in zaza.model.get_juju_model_aliases().keys():
tempest_test_key = model_alias
if model_alias == zaza.charm_lifecycle.utils.DEFAULT_MODEL_ALIAS:
tempest_test_key = 'default'
config = charm_config['tests_options']['tempest'][tempest_test_key]
if config.get('smoke'):
tempest_options.extend(['--smoke'])
if config.get('regex'):
tempest_options.extend(
['--regex',
' '.join([reg for reg in config.get('regex')])])
if config.get('black-regex'):
tempest_options.extend(
['--black-regex',
' '.join([reg for reg in config.get('black-regex')])])
with tempfile.TemporaryDirectory() as tmpdirname:
if config.get('whitelist'):
white_file = os.path.join(tmpdirname, 'white.cfg')
with open(white_file, 'w') as f:
f.write('\n'.join(config.get('whitelist')))
f.write('\n')
tempest_options.extend(['--whitelist-file', white_file])
if config.get('blacklist'):
black_file = os.path.join(tmpdirname, 'black.cfg')
with open(black_file, 'w') as f:
f.write('\n'.join(config.get('blacklist')))
f.write('\n')
tempest_options.extend(['--blacklist-file', black_file])
print(tempest_options)
try:
subprocess.check_call(tempest_options)
except subprocess.CalledProcessError:
return False
return True
+113 -1
View File
@@ -14,13 +14,19 @@
"""Module containing base class for implementing charm tests."""
import contextlib
import logging
import ipaddress
import subprocess
import tenacity
import unittest
import novaclient
import zaza.model as model
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.openstack.configure.guest as configure_guest
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.utilities.generic as generic_utils
import zaza.openstack.charm_tests.glance.setup as glance_setup
def skipIfNotHA(service_name):
@@ -427,7 +433,113 @@ class OpenStackBaseTest(BaseCharmTest):
@classmethod
def setUpClass(cls, application_name=None, model_alias=None):
"""Run setup for test class to create common resources."""
super(OpenStackBaseTest, cls).setUpClass()
super(OpenStackBaseTest, cls).setUpClass(application_name, model_alias)
cls.keystone_session = openstack_utils.get_overcloud_keystone_session(
model_name=cls.model_name)
cls.cacert = openstack_utils.get_cacert()
cls.nova_client = (
openstack_utils.get_nova_session_client(cls.keystone_session))
def launch_guest(self, guest_name, userdata=None):
"""Launch two guests to use in tests.
Note that it is up to the caller to have set the RESOURCE_PREFIX class
variable prior to calling this method.
Also note that this method will remove any already existing instance
with same name as what is requested.
:param guest_name: Name of instance
:type guest_name: str
:param userdata: Userdata to attach to instance
:type userdata: Optional[str]
:returns: Nova instance objects
:rtype: Server
"""
instance_name = '{}-{}'.format(self.RESOURCE_PREFIX, guest_name)
instance = self.retrieve_guest(instance_name)
if instance:
logging.info('Removing already existing instance ({}) with '
'requested name ({})'
.format(instance.id, instance_name))
openstack_utils.delete_resource(
self.nova_client.servers,
instance.id,
msg="server")
return configure_guest.launch_instance(
glance_setup.LTS_IMAGE_NAME,
vm_name=instance_name,
userdata=userdata)
def launch_guests(self, userdata=None):
"""Launch two guests to use in tests.
Note that it is up to the caller to have set the RESOURCE_PREFIX class
variable prior to calling this method.
:param userdata: Userdata to attach to instance
:type userdata: Optional[str]
:returns: List of launched Nova instance objects
:rtype: List[Server]
"""
launched_instances = []
for guest_number in range(1, 2+1):
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(
multiplier=1, min=2, max=10)):
with attempt:
launched_instances.append(
self.launch_guest(
guest_name='ins-{}'.format(guest_number),
userdata=userdata))
return launched_instances
def retrieve_guest(self, guest_name):
"""Return guest matching name.
:param nova_client: Nova client to use when checking status
:type nova_client: Nova client
:returns: the matching guest
:rtype: Union[novaclient.Server, None]
"""
try:
return self.nova_client.servers.find(name=guest_name)
except novaclient.exceptions.NotFound:
return None
def retrieve_guests(self):
"""Return test guests.
Note that it is up to the caller to have set the RESOURCE_PREFIX class
variable prior to calling this method.
:param nova_client: Nova client to use when checking status
:type nova_client: Nova client
:returns: the matching guest
:rtype: Union[novaclient.Server, None]
"""
instance_1 = self.retrieve_guest(
'{}-ins-1'.format(self.RESOURCE_PREFIX))
instance_2 = self.retrieve_guest(
'{}-ins-1'.format(self.RESOURCE_PREFIX))
return instance_1, instance_2
def format_addr(addr):
"""Validate and format IP address.
:param addr: IPv6 or IPv4 address
:type addr: str
:returns: Address string, optionally encapsulated in brackets([])
:rtype: str
:raises: ValueError
"""
ipaddr = ipaddress.ip_address(addr)
if isinstance(ipaddr, ipaddress.IPv6Address):
fmt = '[{}]'
else:
fmt = '{}'
return fmt.format(ipaddr)
@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing TrilioVault."""
@@ -0,0 +1,83 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for configuring Trilio."""
import logging
import os
import zaza.model as zaza_model
import zaza.openstack.utilities.juju as juju_utils
import zaza.openstack.utilities.generic as generic_utils
def basic_setup():
"""Run setup for testing Trilio.
Setup for testing Trilio is currently part of functional
tests.
"""
logging.info("Configuring NFS Server")
nfs_server_ip = zaza_model.get_app_ips("nfs-server-test-fixture")[0]
trilio_wlm_unit = zaza_model.get_first_unit_name("trilio-wlm")
nfs_shares_conf = {"nfs-shares": "{}:/srv/testing".format(nfs_server_ip)}
_trilio_services = ["trilio-wlm", "trilio-data-mover"]
conf_changed = False
for juju_service in _trilio_services:
app_config = zaza_model.get_application_config(juju_service)
if app_config["nfs-shares"] != nfs_shares_conf["nfs-shares"]:
zaza_model.set_application_config(juju_service, nfs_shares_conf)
conf_changed = True
if conf_changed:
zaza_model.wait_for_agent_status()
# NOTE(jamespage): wlm-api service must be running in order
# to execute the setup actions
zaza_model.block_until_service_status(
unit_name=trilio_wlm_unit,
services=["wlm-api"],
target_status="active",
)
logging.info("Executing create-cloud-admin-trust")
password = juju_utils.leader_get("keystone", "admin_passwd")
generic_utils.assertActionRanOK(
zaza_model.run_action_on_leader(
"trilio-wlm",
"create-cloud-admin-trust",
raise_on_failure=True,
action_params={"password": password},
)
)
logging.info("Executing create-license")
test_license = os.environ.get("TEST_TRILIO_LICENSE")
if test_license and os.path.exists(test_license):
zaza_model.attach_resource("trilio-wlm",
resource_name='license',
resource_path=test_license)
generic_utils.assertActionRanOK(
zaza_model.run_action_on_leader(
"trilio-wlm", "create-license",
raise_on_failure=True
)
)
else:
logging.error("Unable to find Trilio License file")
+419
View File
@@ -0,0 +1,419 @@
#!/usr/bin/env python3
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of tests for vault."""
import logging
import tenacity
import zaza.model as zaza_model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.glance.setup as glance_setup
import zaza.openstack.configure.guest as guest_utils
def _resource_reaches_status(
unit, auth_args, command, resource_id, target_status
):
"""Wait for a workload resource to reach a status.
:param unit: unit to run cli commands on
:type unit: zaza_model.Unit
:param auth_args: authentication arguments for command
:type auth_args: str
:param command: command to execute
:type command: str
:param resource_id: resource ID to monitor
:type resource_id: str
:param target_status: status to monitor for
:type target_status: str
"""
resource_status = (
juju_utils.remote_run(
unit,
remote_cmd=command.format(
auth_args=auth_args, resource_id=resource_id
),
timeout=180,
fatal=True,
)
.strip()
.split("\n")[-1]
)
logging.info(
"Checking resource ({}) status: {}".format(
resource_id, resource_status
)
)
if resource_status == target_status:
return
raise Exception("Resource not ready: {}".format(resource_status))
class WorkloadmgrCLIHelper(object):
"""Helper for working with workloadmgrcli."""
WORKLOAD_CREATE_CMD = (
"openstack {auth_args} workload create "
"--instance instance-id={instance_id} "
"-f value -c ID"
)
WORKLOAD_STATUS_CMD = (
"openstack {auth_args} workload show "
"-f value -c status "
" {resource_id} "
)
SNAPSHOT_CMD = (
"openstack {auth_args} workload snapshot --full {workload_id}"
)
SNAPSHOT_ID_CMD = (
"openstack {auth_args} workload snapshot list "
"--workload_id {workload_id} "
"-f value -c ID"
)
SNAPSHOT_STATUS_CMD = (
"openstack {auth_args} workload snapshot show "
"-f value -c status "
"{resource_id} "
)
ONECLICK_RESTORE_CMD = (
"openstack {auth_args} workload snapshot oneclick-restore "
"{snapshot_id} "
)
RESTORE_LIST_CMD = (
"openstack {auth_args} workloadmgr restore list "
"--snapshot_id {snapshot_id} "
"-f value -c ID"
)
RESTORE_STATUS_CMD = (
"openstack {auth_args} workloadmgr restore show "
"-f value -c status {resource_id}"
)
def __init__(self, keystone_client):
"""Initialise helper.
:param keystone_client: keystone client
:type keystone_client: keystoneclient.v3
"""
self.trilio_wlm_unit = zaza_model.get_first_unit_name(
"trilio-wlm"
)
self.auth_args = self._auth_arguments(keystone_client)
@classmethod
def _auth_arguments(cls, keystone_client):
"""Generate workloadmgrcli arguments for cloud authentication.
:returns: string of required cli arguments for authentication
:rtype: str
"""
overcloud_auth = openstack_utils.get_overcloud_auth()
overcloud_auth.update(
{
"OS_DOMAIN_ID": openstack_utils.get_domain_id(
keystone_client, domain_name="admin_domain"
),
"OS_TENANT_ID": openstack_utils.get_project_id(
keystone_client,
project_name="admin",
domain_name="admin_domain",
),
"OS_TENANT_NAME": "admin",
}
)
_required_keys = [
"OS_AUTH_URL",
"OS_USERNAME",
"OS_PASSWORD",
"OS_REGION_NAME",
"OS_DOMAIN_ID",
"OS_TENANT_ID",
"OS_TENANT_NAME",
]
params = []
for os_key in _required_keys:
params.append(
"--{}={}".format(
os_key.lower().replace("_", "-"),
overcloud_auth[os_key],
)
)
return " ".join(params)
def create_workload(self, instance_id):
"""Create a new workload.
:param instance_id: instance ID to create workload from
:type instance_id: str
:returns: workload ID
:rtype: str
"""
workload_id = juju_utils.remote_run(
self.trilio_wlm_unit,
remote_cmd=self.WORKLOAD_CREATE_CMD.format(
auth_args=self.auth_args, instance_id=instance_id
),
timeout=180,
fatal=True,
).strip()
retryer = tenacity.Retrying(
wait=tenacity.wait_exponential(multiplier=1, max=30),
stop=tenacity.stop_after_delay(180),
reraise=True,
)
retryer(
_resource_reaches_status,
self.trilio_wlm_unit,
self.auth_args,
self.WORKLOAD_STATUS_CMD,
workload_id,
"available",
)
return workload_id
def create_snapshot(self, workload_id):
"""Create a new snapshot.
:param workload_id: workload ID to create snapshot from
:type workload_id: str
:returns: snapshot ID
:rtype: str
"""
juju_utils.remote_run(
self.trilio_wlm_unit,
remote_cmd=self.SNAPSHOT_CMD.format(
auth_args=self.auth_args, workload_id=workload_id
),
timeout=180,
fatal=True,
)
snapshot_id = juju_utils.remote_run(
self.trilio_wlm_unit,
remote_cmd=self.SNAPSHOT_ID_CMD.format(
auth_args=self.auth_args, workload_id=workload_id
),
timeout=180,
fatal=True,
).strip()
retryer = tenacity.Retrying(
wait=tenacity.wait_exponential(multiplier=1, max=30),
stop=tenacity.stop_after_delay(720),
reraise=True,
)
retryer(
_resource_reaches_status,
self.trilio_wlm_unit,
self.auth_args,
self.SNAPSHOT_STATUS_CMD,
snapshot_id,
"available",
)
return snapshot_id
def oneclick_restore(self, snapshot_id):
"""Restore a workload from a snapshot.
:param snapshot_id: snapshot ID to restore
:type snapshot_id: str
"""
juju_utils.remote_run(
self.trilio_wlm_unit,
remote_cmd=self.ONECLICK_RESTORE_CMD.format(
auth_args=self.auth_args, snapshot_id=snapshot_id
),
timeout=180,
fatal=True,
)
restore_id = juju_utils.remote_run(
self.trilio_wlm_unit,
remote_cmd=self.RESTORE_LIST_CMD.format(
auth_args=self.auth_args, snapshot_id=snapshot_id
),
timeout=180,
fatal=True,
).strip()
retryer = tenacity.Retrying(
wait=tenacity.wait_exponential(multiplier=1, max=30),
stop=tenacity.stop_after_delay(720),
reraise=True,
)
retryer(
_resource_reaches_status,
self.trilio_wlm_unit,
self.auth_args,
self.RESTORE_STATUS_CMD,
restore_id,
"available",
)
return restore_id
class TrilioBaseTest(test_utils.OpenStackBaseTest):
"""Base test class for charms."""
RESOURCE_PREFIX = "zaza-triliovault-tests"
conf_file = None
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super().setUpClass()
cls.cinder_client = openstack_utils.get_cinder_session_client(
cls.keystone_session
)
cls.nova_client = openstack_utils.get_nova_session_client(
cls.keystone_session
)
cls.keystone_client = openstack_utils.get_keystone_session_client(
cls.keystone_session
)
def test_restart_on_config_change(self):
"""Check restart happens on config change.
Change debug mode and assert that change propagates to the correct
file and that services are restarted as a result
"""
# Expected default and alternate values
set_default = {"debug": False}
set_alternate = {"debug": True}
# Make config change, check for service restarts
self.restart_on_changed(
self.conf_file,
set_default,
set_alternate,
{"DEFAULT": {"debug": ["False"]}},
{"DEFAULT": {"debug": ["True"]}},
self.services,
)
def test_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started
"""
with self.pause_resume(self.services, pgrep_full=False):
logging.info("Testing pause resume")
def test_snapshot_workload(self):
"""Ensure that a workload can be created and snapshot'ed."""
# Setup volume and instance and attach one to the other
volume = openstack_utils.create_volume(
self.cinder_client,
size="1",
name="{}-100-vol".format(self.RESOURCE_PREFIX),
)
instance = guest_utils.launch_instance(
glance_setup.CIRROS_IMAGE_NAME,
vm_name="{}-server".format(self.RESOURCE_PREFIX),
)
# Trilio need direct access to ceph - OMG
openstack_utils.attach_volume(
self.nova_client, volume.id, instance.id
)
workloadmgrcli = WorkloadmgrCLIHelper(self.keystone_client)
# Create workload using instance
logging.info("Creating workload configuration")
workload_id = workloadmgrcli.create_workload(instance.id)
logging.info("Created workload: {}".format(workload_id))
logging.info("Initiating snapshot")
snapshot_id = workloadmgrcli.create_snapshot(workload_id)
logging.info(
"Snapshot of workload {} created: {}".format(
workload_id, snapshot_id
)
)
logging.info("Deleting server and volume ready for restore")
openstack_utils.delete_resource(
self.nova_client.servers, instance.id, "deleting instance"
)
# NOTE: Trilio leaves a snapshot in place -
# drop before volume deletion.
for (
volume_snapshot
) in self.cinder_client.volume_snapshots.list():
openstack_utils.delete_resource(
self.cinder_client.volume_snapshots,
volume_snapshot.id,
"deleting snapshot",
)
openstack_utils.delete_resource(
self.cinder_client.volumes, volume.id, "deleting volume"
)
logging.info("Initiating restore")
workloadmgrcli.oneclick_restore(snapshot_id)
class TrilioWLMTest(TrilioBaseTest):
"""Tests for Trilio Workload Manager charm."""
conf_file = "/etc/workloadmgr/workloadmgr.conf"
application_name = "trilio-wlm"
services = [
"workloadmgr-api",
"workloadmgr-scheduler",
"workloadmgr-workloads",
"workloadmgr-cron",
]
class TrilioDMAPITest(TrilioBaseTest):
"""Tests for Trilio Data Mover API charm."""
conf_file = "/etc/dmapi/dmapi.conf"
application_name = "trilio-dm-api"
services = ["dmapi-api"]
class TrilioDataMoverTest(TrilioBaseTest):
"""Tests for Trilio Data Mover charm."""
conf_file = "/etc/tvault-contego/tvault-contego.conf"
application_name = "trilio-data-mover"
services = ["tvault-contego"]
+19
View File
@@ -14,6 +14,7 @@
"""Run configuration phase."""
import base64
import functools
import requests
import tempfile
@@ -27,6 +28,22 @@ import zaza.openstack.utilities.generic
import zaza.utilities.juju as juju_utils
def get_cacert_file():
"""Retrieve CA cert used for vault endpoints and write to file.
:returns: Path to file with CA cert.
:rtype: str
"""
cacert_file = None
vault_config = zaza.model.get_application_config('vault')
cacert_b64 = vault_config['ssl-ca']['value']
if cacert_b64:
with tempfile.NamedTemporaryFile(mode='wb', delete=False) as fp:
fp.write(base64.b64decode(cacert_b64))
cacert_file = fp.name
return cacert_file
def basic_setup(cacert=None, unseal_and_authorize=False):
"""Run basic setup for vault tests.
@@ -35,6 +52,7 @@ def basic_setup(cacert=None, unseal_and_authorize=False):
:param unseal_and_authorize: Whether to unseal and authorize vault.
:type unseal_and_authorize: bool
"""
cacert = cacert or get_cacert_file()
vault_svc = vault_utils.VaultFacade(cacert=cacert)
if unseal_and_authorize:
vault_svc.unseal()
@@ -47,6 +65,7 @@ def basic_setup_and_unseal(cacert=None):
:param cacert: Path to CA cert used for vaults api cert.
:type cacert: str
"""
cacert = cacert or get_cacert_file()
vault_svc = vault_utils.VaultFacade(cacert=cacert)
vault_svc.unseal()
for unit in zaza.model.get_units('vault'):
+2 -1
View File
@@ -27,6 +27,7 @@ import yaml
import collections
import zaza.model
import zaza.openstack.charm_tests.test_utils as test_utils
AUTH_FILE = "vault_tests.yaml"
CharmVaultClient = collections.namedtuple(
@@ -101,7 +102,7 @@ def get_unit_api_url(ip):
transport = 'http'
if vault_config['ssl-cert']['value']:
transport = 'https'
return '{}://{}:8200'.format(transport, ip)
return '{}://{}:8200'.format(transport, test_utils.format_addr(ip))
def get_hvac_client(vault_url, cacert=None):
+22 -6
View File
@@ -22,6 +22,14 @@ import time
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.nova.utils as nova_utils
import zaza.openstack.utilities.exceptions as openstack_exceptions
from tenacity import (
RetryError,
Retrying,
stop_after_attempt,
wait_exponential,
)
boot_tests = {
'cirros': {
@@ -134,12 +142,20 @@ def launch_instance(instance_key, use_boot_volume=False, vm_name=None,
port=port)['floating_ip_address']
logging.info('Assigned floating IP {} to {}'.format(ip, vm_name))
try:
openstack_utils.ping_response(ip)
except subprocess.CalledProcessError as e:
logging.error('Pinging {} failed with {}'.format(ip, e.returncode))
logging.error('stdout: {}'.format(e.stdout))
logging.error('stderr: {}'.format(e.stderr))
raise
for attempt in Retrying(
stop=stop_after_attempt(8),
wait=wait_exponential(multiplier=1, min=2, max=60)):
with attempt:
try:
openstack_utils.ping_response(ip)
except subprocess.CalledProcessError as e:
logging.error('Pinging {} failed with {}'
.format(ip, e.returncode))
logging.error('stdout: {}'.format(e.stdout))
logging.error('stderr: {}'.format(e.stderr))
raise
except RetryError:
raise openstack_exceptions.NovaGuestNoPingResponse()
# Check ssh'ing to instance.
logging.info('Testing ssh access.')
+1 -1
View File
@@ -85,7 +85,7 @@ def create_segments(segment_number=1, host_assignment_method=None):
@tenacity.retry(
wait=tenacity.wait_exponential(multiplier=2, max=60),
reraise=True, stop=tenacity.stop_after_attempt(5),
reraise=True, stop=tenacity.stop_after_attempt(10),
retry=tenacity.retry_if_exception_type(ostack_except.ConflictException))
def enable_host(masakari_client, host, segment):
"""Enable hypervisor within masakari.
@@ -0,0 +1,76 @@
"""Module to setup pre-deploy TLS certs."""
import ipaddress
import itertools
import base64
import os
import zaza.openstack.utilities.cert
ISSUER_NAME = 'OSCI'
def set_cidr_certs():
"""Create certs and keys for deploy using IP SANS from CIDR.
Create a certificate authority certificate and key. The CA cert and key
are then base 64 encoded and assigned to the TEST_CAKEY and
TEST_CACERT environment variables.
Using the CA key a second certificate and key are generated. The new
certificate has a SAN entry for the first 2^11 IPs in the CIDR.
The cert and key are then base 64 encoded and assigned to the TEST_KEY
and TEST_CERT environment variables.
"""
(cakey, cacert) = zaza.openstack.utilities.cert.generate_cert(
ISSUER_NAME,
generate_ca=True)
os.environ['TEST_CAKEY'] = base64.b64encode(cakey).decode()
os.environ['TEST_CACERT'] = base64.b64encode(cacert).decode()
# We need to restrain the number of SubjectAlternativeNames we attempt to
# put # in the certificate. There is a hard limit for what length the sum
# of all extensions in the certificate can have.
#
# - 2^11 ought to be enough for anybody
alt_names = []
for addr in itertools.islice(
ipaddress.IPv4Network(os.environ.get('TEST_CIDR_EXT')), 2**11):
alt_names.append(str(addr))
(key, cert) = zaza.openstack.utilities.cert.generate_cert(
'*.serverstack',
alternative_names=alt_names,
issuer_name=ISSUER_NAME,
signing_key=cakey)
os.environ['TEST_KEY'] = base64.b64encode(key).decode()
os.environ['TEST_CERT'] = base64.b64encode(cert).decode()
def set_certs_per_vips():
"""Create certs and keys for deploy using VIPS.
Create a certificate authority certificate and key. The CA cert and key
are then base 64 encoded and assigned to the TEST_CAKEY and
TEST_CACERT environment variables.
Using the CA key a certificate and key is generated for each VIP specified
via environment variables. eg if TEST_VIP06=172.20.0.107 is set in the
environment then a cert with a SAN entry for 172.20.0.107 is generated.
The cert and key are then base 64 encoded and assigned to the
TEST_VIP06_KEY and TEST_VIP06_CERT environment variables.
"""
(cakey, cacert) = zaza.openstack.utilities.cert.generate_cert(
ISSUER_NAME,
generate_ca=True)
os.environ['TEST_CAKEY'] = base64.b64encode(cakey).decode()
os.environ['TEST_CACERT'] = base64.b64encode(cacert).decode()
for vip_name, vip_ip in os.environ.items():
if vip_name.startswith('TEST_VIP'):
(key, cert) = zaza.openstack.utilities.cert.generate_cert(
'*.serverstack',
alternative_names=[vip_ip],
issuer_name=ISSUER_NAME,
signing_key=cakey)
os.environ[
'{}_KEY'.format(vip_name)] = base64.b64encode(key).decode()
os.environ[
'{}_CERT'.format(vip_name)] = base64.b64encode(cert).decode()
+24
View File
@@ -18,6 +18,8 @@ Functions for managing masakari resources and simulating compute node loss
and recovery.
"""
import logging
import tenacity
import time
import zaza.model
@@ -119,3 +121,25 @@ def create_server_power_off_alarm(aodh_client, alarm_name, server_uuid):
'type': 'string',
'value': server_uuid}]}}
return aodh_client.alarm.create(alarm_def)
def block_until_alarm_state(aodh_client, alarm_id, target_state='alarm'):
"""Block until alarm has reached target state.
:param aodh_client: Authenticated aodh v2 client
:type aodh_client: aodhclient.v2.client.Client
:param alarm_id: ID of provided alarm
:type alarm_id: str
:param target_state: uuid of alarm to check
:stype target_state: str
"""
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(multiplier=1, min=2, max=10)):
with attempt:
alarm_state = get_alarm_state(
aodh_client,
alarm_id)
logging.info('Alarm in state {}'.format(alarm_state))
assert alarm_state == target_state
+12
View File
@@ -168,6 +168,12 @@ class CephPoolNotConfigured(Exception):
pass
class CephGenericError(Exception):
"""A generic/other Ceph error occurred."""
pass
class NovaGuestMigrationFailed(Exception):
"""Nova guest migration failed."""
@@ -180,6 +186,12 @@ class NovaGuestRestartFailed(Exception):
pass
class NovaGuestNoPingResponse(Exception):
"""Nova guest failed to respond to pings."""
pass
class PolicydError(Exception):
"""Policyd override failed."""
+91 -143
View File
@@ -13,18 +13,31 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for interacting with juju."""
import os
from pathlib import Path
import yaml
"""Deprecated, please use zaza.utilities.juju."""
from zaza import (
model,
controller,
)
from zaza.openstack.utilities import generic as generic_utils
import logging
import functools
import zaza.utilities.juju
def deprecate():
"""Add a deprecation warning to wrapped function."""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
msg = (
"{} from zaza.openstack.utilities.juju is deprecated. "
"Please use the equivalent from zaza.utilities.juju".format(
f.__name__))
logging.warning(msg)
return f(*args, **kwargs)
return wrapped_f
return wrap
@deprecate()
def get_application_status(application=None, unit=None, model_name=None):
"""Return the juju status for an application.
@@ -37,18 +50,29 @@ def get_application_status(application=None, unit=None, model_name=None):
:returns: Juju status output for an application
:rtype: dict
"""
status = get_full_juju_status()
if unit and not application:
application = unit.split("/")[0]
if application:
status = status.applications.get(application)
if unit:
status = status.get("units").get(unit)
return status
return zaza.utilities.juju.get_application_status(
application=application,
unit=unit,
model_name=model_name)
@deprecate()
def get_application_ip(application, model_name=None):
"""Get the application's IP address.
:param application: Application name
:type application: str
:param model_name: Name of model to query.
:type model_name: str
:returns: Application's IP address
:rtype: str
"""
return zaza.utilities.juju.get_application_ip(
application,
model_name=model_name)
@deprecate()
def get_cloud_configs(cloud=None):
"""Get cloud configuration from local clouds.yaml.
@@ -60,14 +84,11 @@ def get_cloud_configs(cloud=None):
:returns: Dictionary of cloud configuration
:rtype: dict
"""
home = str(Path.home())
cloud_config = os.path.join(home, ".local", "share", "juju", "clouds.yaml")
if cloud:
return generic_utils.get_yaml_config(cloud_config)["clouds"].get(cloud)
else:
return generic_utils.get_yaml_config(cloud_config)
return zaza.utilities.juju.get_cloud_configs(
cloud=cloud)
@deprecate()
def get_full_juju_status(model_name=None):
"""Return the full juju status output.
@@ -76,10 +97,11 @@ def get_full_juju_status(model_name=None):
:returns: Full juju status output
:rtype: dict
"""
status = model.get_status(model_name=model_name)
return status
return zaza.utilities.juju.get_full_juju_status(
model_name=model_name)
@deprecate()
def get_machines_for_application(application, model_name=None):
"""Return machines for a given application.
@@ -90,20 +112,12 @@ def get_machines_for_application(application, model_name=None):
:returns: machines for an application
:rtype: Iterator[str]
"""
status = get_application_status(application, model_name=model_name)
if not status:
return
# libjuju juju status no longer has units for subordinate charms
# Use the application it is subordinate-to to find machines
if status.get("units") is None and status.get("subordinate-to"):
status = get_application_status(status.get("subordinate-to")[0],
model_name=model_name)
for unit in status.get("units").keys():
yield status.get("units").get(unit).get("machine")
return zaza.utilities.juju.get_machines_for_application(
application,
model_name=model_name)
@deprecate()
def get_unit_name_from_host_name(host_name, application, model_name=None):
"""Return the juju unit name corresponding to a hostname.
@@ -114,17 +128,13 @@ def get_unit_name_from_host_name(host_name, application, model_name=None):
:param model_name: Name of model to query.
:type model_name: str
"""
# Assume that a juju managed hostname always ends in the machine number and
# remove the domain name if it present.
machine_number = host_name.split('-')[-1].split('.')[0]
unit_names = [
u.entity_id
for u in model.get_units(application_name=application,
model_name=model_name)
if int(u.data['machine-id']) == int(machine_number)]
return unit_names[0]
return zaza.utilities.juju.get_unit_name_from_host_name(
host_name,
application,
model_name=model_name)
@deprecate()
def get_machine_status(machine, key=None, model_name=None):
"""Return the juju status for a machine.
@@ -137,17 +147,13 @@ def get_machine_status(machine, key=None, model_name=None):
:returns: Juju status output for a machine
:rtype: dict
"""
status = get_full_juju_status(model_name=model_name)
if "lxd" in machine:
host = machine.split('/')[0]
status = status.machines.get(host)['containers'][machine]
else:
status = status.machines.get(machine)
if key:
status = status.get(key)
return status
return zaza.utilities.juju.get_machine_status(
machine,
key=key,
model_name=model_name)
@deprecate()
def get_machine_series(machine, model_name=None):
"""Return the juju series for a machine.
@@ -158,13 +164,12 @@ def get_machine_series(machine, model_name=None):
:returns: Juju series
:rtype: string
"""
return get_machine_status(
machine=machine,
key='series',
model_name=model_name
)
return zaza.utilities.juju.get_machine_series(
machine,
model_name=model_name)
@deprecate()
def get_machine_uuids_for_application(application, model_name=None):
"""Return machine uuids for a given application.
@@ -175,30 +180,22 @@ def get_machine_uuids_for_application(application, model_name=None):
:returns: machine uuuids for an application
:rtype: Iterator[str]
"""
for machine in get_machines_for_application(application,
model_name=model_name):
yield get_machine_status(machine, key="instance-id",
model_name=model_name)
return zaza.utilities.juju.get_machine_uuids_for_application(
application,
model_name=model_name)
@deprecate()
def get_provider_type():
"""Get the type of the undercloud.
:returns: Name of the undercloud type
:rtype: string
"""
cloud = controller.get_cloud()
if cloud:
# If the controller was deployed from this system with
# the cloud configured in ~/.local/share/juju/clouds.yaml
# Determine the cloud type directly
return get_cloud_configs(cloud)["type"]
else:
# If the controller was deployed elsewhere
# For now assume openstack
return "openstack"
return zaza.utilities.juju.get_provider_type()
@deprecate()
def remote_run(unit, remote_cmd, timeout=None, fatal=None, model_name=None):
"""Run command on unit and return the output.
@@ -217,46 +214,15 @@ def remote_run(unit, remote_cmd, timeout=None, fatal=None, model_name=None):
:rtype: string
:raises: model.CommandRunFailed
"""
if fatal is None:
fatal = True
result = model.run_on_unit(
return zaza.utilities.juju.remote_run(
unit,
remote_cmd,
timeout=timeout,
fatal=fatal,
model_name=model_name)
if result:
if int(result.get("Code")) == 0:
return result.get("Stdout")
else:
if fatal:
raise model.CommandRunFailed(remote_cmd, result)
return result.get("Stderr")
def _get_unit_names(names, model_name=None):
"""Resolve given application names to first unit name of said application.
Helper function that resolves application names to first unit name of
said application. Any already resolved unit names are returned as-is.
:param names: List of units/applications to translate
:type names: list(str)
:param model_name: Name of model to query.
:type model_name: str
:returns: List of units
:rtype: list(str)
"""
result = []
for name in names:
if '/' in name:
result.append(name)
else:
result.append(model.get_first_unit_name(
name,
model_name=model_name))
return result
@deprecate()
def get_relation_from_unit(entity, remote_entity, remote_interface_name,
model_name=None):
"""Get relation data passed between two units.
@@ -281,22 +247,14 @@ def get_relation_from_unit(entity, remote_entity, remote_interface_name,
:rtype: dict
:raises: model.CommandRunFailed
"""
application = entity.split('/')[0]
remote_application = remote_entity.split('/')[0]
rid = model.get_relation_id(application, remote_application,
remote_interface_name=remote_interface_name,
model_name=model_name)
(unit, remote_unit) = _get_unit_names(
[entity, remote_entity],
return zaza.utilities.juju.get_relation_from_unit(
entity,
remote_entity,
remote_interface_name,
model_name=model_name)
cmd = 'relation-get --format=yaml -r "{}" - "{}"' .format(rid, remote_unit)
result = model.run_on_unit(unit, cmd, model_name=model_name)
if result and int(result.get('Code')) == 0:
return yaml.safe_load(result.get('Stdout'))
else:
raise model.CommandRunFailed(cmd, result)
@deprecate()
def leader_get(application, key='', model_name=None):
"""Get leader settings from leader unit of named application.
@@ -310,14 +268,13 @@ def leader_get(application, key='', model_name=None):
:rtype: dict
:raises: model.CommandRunFailed
"""
cmd = 'leader-get --format=yaml {}'.format(key)
result = model.run_on_leader(application, cmd, model_name=model_name)
if result and int(result.get('Code')) == 0:
return yaml.safe_load(result.get('Stdout'))
else:
raise model.CommandRunFailed(cmd, result)
return zaza.utilities.juju.leader_get(
application,
key=key,
model_name=model_name)
@deprecate()
def get_subordinate_units(unit_list, charm_name=None, status=None,
model_name=None):
"""Get a list of all subordinate units associated with units in unit_list.
@@ -348,17 +305,8 @@ def get_subordinate_units(unit_list, charm_name=None, status=None,
:returns: List of matching unit names.
:rtype: []
"""
if not status:
status = model.get_status(model_name=model_name)
sub_units = []
for unit_name in unit_list:
app_name = unit_name.split('/')[0]
subs = status.applications[app_name]['units'][unit_name].get(
'subordinates') or {}
if charm_name:
for unit_name, unit_data in subs.items():
if charm_name in unit_data['charm']:
sub_units.append(unit_name)
else:
sub_units.extend([n for n in subs.keys()])
return sub_units
return zaza.utilities.juju.get_subordinate_units(
unit_list,
charm_name=charm_name,
status=status,
model_name=model_name)
+82 -10
View File
@@ -39,6 +39,7 @@ from keystoneauth1.identity import (
v2,
)
import zaza.openstack.utilities.cert as cert
import zaza.utilities.deployment_env as deployment_env
from novaclient import client as novaclient_client
from neutronclient.v2_0 import client as neutronclient
from neutronclient.common import exceptions as neutronexceptions
@@ -72,7 +73,9 @@ CIRROS_RELEASE_URL = 'http://download.cirros-cloud.net/version/released'
CIRROS_IMAGE_URL = 'http://download.cirros-cloud.net'
UBUNTU_IMAGE_URLS = {
'bionic': ('http://cloud-images.ubuntu.com/{release}/current/'
'{release}-server-cloudimg-{arch}.img')
'{release}-server-cloudimg-{arch}.img'),
'focal': ('http://cloud-images.ubuntu.com/{release}/current/'
'{release}-server-cloudimg-{arch}.img'),
}
CHARM_TYPES = {
@@ -108,6 +111,10 @@ CHARM_TYPES = {
'pkg': 'designate-common',
'origin_setting': 'openstack-origin'
},
'ovn-central': {
'pkg': 'ovn-common',
'origin_setting': 'source'
},
}
# Older tests use the order the services appear in the list to imply
@@ -126,6 +133,7 @@ UPGRADE_SERVICES = [
{'name': 'nova-compute', 'type': CHARM_TYPES['nova']},
{'name': 'openstack-dashboard',
'type': CHARM_TYPES['openstack-dashboard']},
{'name': 'ovn-central', 'type': CHARM_TYPES['ovn-central']},
]
@@ -155,7 +163,7 @@ WORKLOAD_STATUS_EXCEPTIONS = {
KEYSTONE_CACERT = "keystone_juju_ca_cert.crt"
KEYSTONE_REMOTE_CACERT = (
"/usr/local/share/ca-certificates/{}".format(KEYSTONE_CACERT))
KEYSTONE_LOCAL_CACERT = ("/tmp/{}".format(KEYSTONE_CACERT))
KEYSTONE_LOCAL_CACERT = ("tests/{}".format(KEYSTONE_CACERT))
def get_cacert():
@@ -487,6 +495,22 @@ def get_project_id(ks_client, project_name, api_version=2, domain_name=None):
return None
def get_domain_id(ks_client, domain_name):
"""Return domain ID.
:param ks_client: Authenticated keystoneclient
:type ks_client: keystoneclient.v3.Client object
:param domain_name: Name of the domain
:type domain_name: string
:returns: Domain ID
:rtype: string or None
"""
all_domains = ks_client.domains.list(name=domain_name)
if all_domains:
return all_domains[0].id
return None
# Neutron Helpers
def get_gateway_uuids():
"""Return machine uuids for neutron-gateway(s).
@@ -709,6 +733,7 @@ def configure_gateway_ext_port(novaclient, neutronclient, net_id=None,
if not net_id:
net_id = get_admin_net(neutronclient)['id']
ports_created = 0
for uuid in uuids:
server = novaclient.servers.get(uuid)
ext_port_name = "{}_ext-port".format(server.name)
@@ -729,12 +754,19 @@ def configure_gateway_ext_port(novaclient, neutronclient, net_id=None,
}
}
port = neutronclient.create_port(body=body_value)
ports_created += 1
server.interface_attach(port_id=port['port']['id'],
net_id=None, fixed_ip=None)
if add_dataport_to_netplan:
mac_address = get_mac_from_port(port, neutronclient)
add_interface_to_netplan(server.name,
mac_address=mac_address)
if not ports_created:
# NOTE: uuids is an iterator so testing it for contents or length prior
# to iterating over it is futile.
raise RuntimeError('Unable to determine UUIDs for machines to attach '
'external networking to.')
ext_br_macs = []
for port in neutronclient.list_ports(network_id=net_id)['ports']:
if 'ext-port' in port['name']:
@@ -1577,7 +1609,7 @@ def get_undercloud_auth():
'API_VERSION': 3,
}
if domain:
auth_settings['OS_DOMAIN_NAME': 'admin_domain'] = domain
auth_settings['OS_DOMAIN_NAME'] = domain
else:
auth_settings['OS_USER_DOMAIN_NAME'] = (
os.environ.get('OS_USER_DOMAIN_NAME'))
@@ -1589,6 +1621,10 @@ def get_undercloud_auth():
if os_project_id is not None:
auth_settings['OS_PROJECT_ID'] = os_project_id
_os_cacert = os.environ.get('OS_CACERT')
if _os_cacert:
auth_settings.update({'OS_CACERT': _os_cacert})
# Validate settings
for key, settings in list(auth_settings.items()):
if settings is None:
@@ -1718,14 +1754,15 @@ def get_urllib_opener():
Using urllib.request.urlopen will automatically handle proxies so none
of this function is needed except we are currently specifying proxies
via OS_TEST_HTTP_PROXY rather than http_proxy so a ProxyHandler is needed
via TEST_HTTP_PROXY rather than http_proxy so a ProxyHandler is needed
explicitly stating the proxies.
:returns: An opener which opens URLs via BaseHandlers chained together
:rtype: urllib.request.OpenerDirector
"""
http_proxy = os.getenv('OS_TEST_HTTP_PROXY')
logging.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy))
deploy_env = deployment_env.get_deployment_context()
http_proxy = deploy_env.get('TEST_HTTP_PROXY')
logging.debug('TEST_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
handler = urllib.request.ProxyHandler({'http': http_proxy})
@@ -2069,6 +2106,28 @@ def create_volume(cinder, size, name=None, image=None):
return volume
def attach_volume(nova, volume_id, instance_id):
"""Attach a cinder volume to a nova instance.
:param nova: Authenticated nova client
:type nova: novaclient.v2.client.Client
:param volume_id: the id of the volume to attach
:type volume_id: str
:param instance_id: the id of the instance to attach the volume to
:type instance_id: str
:returns: nova volume pointer
:rtype: novaclient.v2.volumes.Volume
"""
logging.info(
'Attaching volume {} to instance {}'.format(
volume_id, instance_id
)
)
return nova.volumes.create_server_volume(server_id=instance_id,
volume_id=volume_id,
device='/dev/vdx')
def create_volume_backup(cinder, volume_id, name=None):
"""Create cinder volume backup.
@@ -2224,7 +2283,7 @@ def get_ports_from_device_id(neutron_client, device_id):
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=120),
reraise=True, stop=tenacity.stop_after_attempt(12))
reraise=True, stop=tenacity.stop_after_delay(1800))
def cloud_init_complete(nova_client, vm_id, bootstring):
"""Wait for cloud init to complete on the given vm.
@@ -2262,7 +2321,7 @@ def ping_response(ip):
check=True)
def ssh_test(username, ip, vm_name, password=None, privkey=None):
def ssh_test(username, ip, vm_name, password=None, privkey=None, retry=True):
"""SSH to given ip using supplied credentials.
:param username: Username to connect with
@@ -2277,6 +2336,9 @@ def ssh_test(username, ip, vm_name, password=None, privkey=None):
:param privkey: Private key to authenticate with. If a password is
supplied it is used rather than the private key.
:type privkey: str
:param retry: If True, retry a few times if an exception is raised in the
process, e.g. on connection failure.
:type retry: boolean
:raises: exceptions.SSHFailed
"""
def verify(stdin, stdout, stderr):
@@ -2290,8 +2352,18 @@ def ssh_test(username, ip, vm_name, password=None, privkey=None):
vm_name))
raise exceptions.SSHFailed()
ssh_command(username, ip, vm_name, 'uname -n',
password=password, privkey=privkey, verify=verify)
# NOTE(lourot): paramiko.SSHClient().connect() calls read_all() which can
# raise an EOFError, see
# * https://docs.paramiko.org/en/stable/api/packet.html
# * https://github.com/paramiko/paramiko/issues/925
# So retrying a few times makes sense.
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(3 if retry else 1),
wait=tenacity.wait_exponential(multiplier=1, min=2, max=10),
reraise=True):
with attempt:
ssh_command(username, ip, vm_name, 'uname -n',
password=password, privkey=privkey, verify=verify)
def ssh_command(username,
+4
View File
@@ -230,4 +230,8 @@ PACKAGE_CODENAMES = {
('9', 'train'),
('10', 'ussuri'),
]),
'ovn-common': OrderedDict([
('2', 'train'),
('20', 'ussuri'),
]),
}
@@ -212,11 +212,11 @@ async def parallel_series_upgrade(
for unit in status["units"]
]
await asyncio.gather(*app_idle)
await prepare_series_upgrade(leader_machine, to_series=to_series)
prepare_group = [
prepare_series_upgrade(machine, to_series=to_series)
for machine in machines]
await asyncio.gather(*prepare_group)
await prepare_series_upgrade(leader_machine, to_series=to_series)
if leader_machine not in completed_machines:
machines.append(leader_machine)
upgrade_group = [
+89 -15
View File
@@ -107,11 +107,17 @@ def run_post_upgrade_functions(post_upgrade_functions):
cl_utils.get_class(func)()
def series_upgrade_non_leaders_first(application, from_series="trusty",
to_series="xenial",
origin='openstack-origin',
completed_machines=[],
post_upgrade_functions=None):
def series_upgrade_non_leaders_first(
application, from_series="trusty",
to_series="xenial",
origin='openstack-origin',
completed_machines=[],
pause_non_leader_primary=False,
pause_non_leader_subordinate=False,
files=None,
workaround_script=None,
post_upgrade_functions=None
):
"""Series upgrade non leaders first.
Wrap all the functionality to handle series upgrade for charms
@@ -129,6 +135,18 @@ def series_upgrade_non_leaders_first(application, from_series="trusty",
:param completed_machines: List of completed machines which do no longer
require series upgrade.
:type completed_machines: list
:param pause_non_leader_primary: Whether the non-leader applications should
be paused
:type pause_non_leader_primary: bool
:param pause_non_leader_subordinate: Whether the non-leader subordinate
hacluster applications should be
paused
:type pause_non_leader_subordinate: bool
:param from_series: The series from which to upgrade
:param files: Workaround files to scp to unit under upgrade
:type files: list
:param workaround_script: Workaround script to run during series upgrade
:type workaround_script: str
:returns: None
:rtype: None
"""
@@ -141,6 +159,23 @@ def series_upgrade_non_leaders_first(application, from_series="trusty",
else:
non_leaders.append(unit)
# Pause the non-leaders
for unit in non_leaders:
if pause_non_leader_subordinate:
if status["units"][unit].get("subordinates"):
for subordinate in status["units"][unit]["subordinates"]:
_app = subordinate.split('/')[0]
if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST:
logging.info("Skipping pausing {} - blacklisted"
.format(subordinate))
else:
logging.info("Pausing {}".format(subordinate))
model.run_action(
subordinate, "pause", action_params={})
if pause_non_leader_primary:
logging.info("Pausing {}".format(unit))
model.run_action(unit, "pause", action_params={})
# Series upgrade the non-leaders first
for unit in non_leaders:
machine = status["units"][unit]["machine"]
@@ -155,7 +190,7 @@ def series_upgrade_non_leaders_first(application, from_series="trusty",
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded. "
.format(unit, machine, application))
.format(unit, machine))
model.block_until_all_units_idle()
# Series upgrade the leader
@@ -165,20 +200,28 @@ def series_upgrade_non_leaders_first(application, from_series="trusty",
series_upgrade(leader, machine,
from_series=from_series, to_series=to_series,
origin=origin,
workaround_script=workaround_script,
files=files,
post_upgrade_functions=post_upgrade_functions)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded."
.format(unit, machine, application))
.format(unit, machine))
model.block_until_all_units_idle()
async def async_series_upgrade_non_leaders_first(application,
from_series="trusty",
to_series="xenial",
origin='openstack-origin',
completed_machines=[],
post_upgrade_functions=None):
async def async_series_upgrade_non_leaders_first(
application,
from_series="trusty",
to_series="xenial",
origin='openstack-origin',
completed_machines=[],
pause_non_leader_primary=False,
pause_non_leader_subordinate=False,
files=None,
workaround_script=None,
post_upgrade_functions=None
):
"""Series upgrade non leaders first.
Wrap all the functionality to handle series upgrade for charms
@@ -196,6 +239,18 @@ async def async_series_upgrade_non_leaders_first(application,
:param completed_machines: List of completed machines which do no longer
require series upgrade.
:type completed_machines: list
:param pause_non_leader_primary: Whether the non-leader applications should
be paused
:type pause_non_leader_primary: bool
:param pause_non_leader_subordinate: Whether the non-leader subordinate
hacluster applications should be
paused
:type pause_non_leader_subordinate: bool
:param from_series: The series from which to upgrade
:param files: Workaround files to scp to unit under upgrade
:type files: list
:param workaround_script: Workaround script to run during series upgrade
:type workaround_script: str
:returns: None
:rtype: None
"""
@@ -208,6 +263,23 @@ async def async_series_upgrade_non_leaders_first(application,
else:
non_leaders.append(unit)
# Pause the non-leaders
for unit in non_leaders:
if pause_non_leader_subordinate:
if status["units"][unit].get("subordinates"):
for subordinate in status["units"][unit]["subordinates"]:
_app = subordinate.split('/')[0]
if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST:
logging.info("Skipping pausing {} - blacklisted"
.format(subordinate))
else:
logging.info("Pausing {}".format(subordinate))
await model.async_run_action(
subordinate, "pause", action_params={})
if pause_non_leader_primary:
logging.info("Pausing {}".format(unit))
await model.async_run_action(unit, "pause", action_params={})
# Series upgrade the non-leaders first
for unit in non_leaders:
machine = status["units"][unit]["machine"]
@@ -223,7 +295,7 @@ async def async_series_upgrade_non_leaders_first(application,
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded. "
.format(unit, machine, application))
.format(unit, machine))
await model.async_block_until_all_units_idle()
# Series upgrade the leader
@@ -234,11 +306,13 @@ async def async_series_upgrade_non_leaders_first(application,
leader, machine,
from_series=from_series, to_series=to_series,
origin=origin,
workaround_script=workaround_script,
files=files,
post_upgrade_functions=post_upgrade_functions)
completed_machines.append(machine)
else:
logging.info("Skipping unit: {}. Machine: {} already upgraded."
.format(unit, machine, application))
.format(unit, machine))
await model.async_block_until_all_units_idle()
+37 -20
View File
@@ -20,7 +20,8 @@ import zaza.model
SERVICE_GROUPS = collections.OrderedDict([
('Stateful Services', ['percona-cluster', 'rabbitmq-server', 'ceph-mon']),
('Stateful Services', ['percona-cluster', 'rabbitmq-server', 'ceph-mon',
'mysql-innodb-cluster']),
('Core Identity', ['keystone']),
('Control Plane', [
'aodh', 'barbican', 'ceilometer', 'ceph-fs',
@@ -92,6 +93,19 @@ def _filter_non_openstack_services(app, app_config, model_name=None):
return False
def _apply_extra_filters(filters, extra_filters):
if extra_filters:
if isinstance(extra_filters, list):
filters.extend(extra_filters)
elif callable(extra_filters):
filters.append(extra_filters)
else:
raise RuntimeError(
"extra_filters should be a list of "
"callables")
return filters
def get_upgrade_groups(model_name=None, extra_filters=None):
"""Place apps in the model into their upgrade groups.
@@ -108,18 +122,10 @@ def get_upgrade_groups(model_name=None, extra_filters=None):
_filter_openstack_upgrade_list,
_filter_non_openstack_services,
]
if extra_filters:
if isinstance(extra_filters, list):
filters.extend(extra_filters)
elif callable(extra_filters):
filters.append(extra_filters)
else:
raise RuntimeError(
"extra_filters should be a list of "
"callables")
filters = _apply_extra_filters(filters, extra_filters)
apps_in_model = get_upgrade_candidates(
model_name=model_name,
filters=filters,)
filters=filters)
return _build_service_groups(apps_in_model)
@@ -136,15 +142,26 @@ def get_series_upgrade_groups(model_name=None, extra_filters=None):
:rtype: collections.OrderedDict
"""
filters = [_filter_subordinates]
if extra_filters:
if isinstance(extra_filters, list):
filters.extend(extra_filters)
elif callable(extra_filters):
filters.append(extra_filters)
else:
raise RuntimeError(
"extra_filters should be a list of "
"callables")
filters = _apply_extra_filters(filters, extra_filters)
apps_in_model = get_upgrade_candidates(
model_name=model_name,
filters=filters)
return _build_service_groups(apps_in_model)
def get_charm_upgrade_groups(model_name=None, extra_filters=None):
"""Place apps in the model into their upgrade groups for a charm upgrade.
Place apps in the model into their upgrade groups. If an app is deployed
but is not in SERVICE_GROUPS then it is placed in a sweep_up group.
:param model_name: Name of model to query.
:type model_name: str
:returns: Dict of group lists keyed on group name.
:rtype: collections.OrderedDict
"""
filters = _apply_extra_filters([], extra_filters)
apps_in_model = get_upgrade_candidates(
model_name=model_name,
filters=filters)