Merge branch 'master' into ovs-mark-managed-ports

This commit is contained in:
Aurelien Lourot
2020-08-26 10:24:12 +02:00
34 changed files with 739 additions and 287 deletions

View File

@@ -581,21 +581,27 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
nova_mock.keypairs.create.assert_called_once_with(name='mykeys')
def test_get_private_key_file(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir',
return_value='/tmp/zaza-model1')
self.assertEqual(
openstack_utils.get_private_key_file('mykeys'),
'tests/id_rsa_mykeys')
'/tmp/zaza-model1/id_rsa_mykeys')
def test_write_private_key(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir',
return_value='/tmp/zaza-model1')
m = mock.mock_open()
with mock.patch(
'zaza.openstack.utilities.openstack.open', m, create=False
):
openstack_utils.write_private_key('mykeys', 'keycontents')
m.assert_called_once_with('tests/id_rsa_mykeys', 'w')
m.assert_called_once_with('/tmp/zaza-model1/id_rsa_mykeys', 'w')
handle = m()
handle.write.assert_called_once_with('keycontents')
def test_get_private_key(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir',
return_value='/tmp/zaza-model1')
self.patch_object(openstack_utils.os.path, "isfile",
return_value=True)
m = mock.mock_open(read_data='myprivkey')
@@ -607,6 +613,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
'myprivkey')
def test_get_private_key_file_missing(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir',
return_value='/tmp/zaza-model1')
self.patch_object(openstack_utils.os.path, "isfile",
return_value=False)
self.assertIsNone(openstack_utils.get_private_key('mykeys'))
@@ -765,7 +773,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
privkey='myprivkey')
paramiko_mock.connect.assert_called_once_with(
'10.0.0.10',
password='',
password=None,
pkey='akey',
username='bob')

View File

@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import mock
import pprint
@@ -89,12 +88,13 @@ class TestUpgradeUtils(ut_utils.BaseTestCase):
expected)
def test_get_upgrade_groups(self):
expected = collections.OrderedDict([
expected = [
('Database Services', []),
('Stateful Services', []),
('Core Identity', []),
('Control Plane', ['cinder']),
('Data Plane', ['nova-compute']),
('sweep_up', [])])
('sweep_up', [])]
actual = openstack_upgrade.get_upgrade_groups()
pprint.pprint(expected)
pprint.pprint(actual)
@@ -103,12 +103,13 @@ class TestUpgradeUtils(ut_utils.BaseTestCase):
expected)
def test_get_series_upgrade_groups(self):
expected = collections.OrderedDict([
('Stateful Services', ['mydb']),
expected = [
('Database Services', ['mydb']),
('Stateful Services', []),
('Core Identity', []),
('Control Plane', ['cinder']),
('Data Plane', ['nova-compute']),
('sweep_up', ['ntp'])])
('sweep_up', ['ntp'])]
actual = openstack_upgrade.get_series_upgrade_groups()
pprint.pprint(expected)
pprint.pprint(actual)

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for benchmarking ceph."""

View File

@@ -0,0 +1,124 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ceph Benchmark Tests."""
import logging
import re
import unittest
import zaza.model
class BenchmarkTests(unittest.TestCase):
"""Ceph Bencharmk Tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running ceph benchmark tests."""
super().setUpClass()
cls.results_match = "^[A-Z].*"
cls.pool = "zaza_benchmarks"
cls.test_results = {}
cls.time_in_secs = 30
def parse_bench_results(self, results_string):
"""Parse bench results from string.
:param results string: Output from rados bench command.
With newlines due to juju run's output.
:type results_string: string
:returns: Dictionary of results summary
:rtype: dict
"""
_results = {}
_lines = results_string.split("\n")
for _line in _lines:
_line = _line.strip()
if re.match(self.results_match, _line):
_keyvalues = _line.split(":")
try:
_results[_keyvalues[0].strip()] = _keyvalues[1].strip()
except IndexError:
# Skipping detailed output for summary details
pass
return _results
def run_rados_bench(self, action, params=None):
"""Run rados bench.
:param action: String rados bench command i.e. write, rand, seq
:type action: string
:param params: List of string extra parameters to rados bench command
:type params: List[strings]
:returns: Unit run dict result
:rtype: dict
"""
_cmd = "rados bench -p {} {} {}".format(
self.pool, self.time_in_secs, action)
if params:
_cmd += " "
_cmd += " ".join(params)
logging.info(
"Running '{}' for {} seconds ...".format(_cmd, self.time_in_secs))
_result = zaza.model.run_on_leader(
"ceph-mon", _cmd, timeout=self.time_in_secs + 60)
return _result
def test_001_create_pool(self):
"""Create ceph pool."""
_cmd = "ceph osd pool create {} 100 100".format(self.pool)
_result = zaza.model.run_on_leader(
"ceph-mon", _cmd)
if _result.get("Code") and not _result.get("Code").startswith('0'):
if "already exists" in _result.get("Stderr", ""):
logging.warning(
"Ceph osd pool {} already exits.".format(self.pool))
else:
logging.error("Ceph osd pool create failed")
raise Exception(_result.get("Stderr", ""))
def test_100_rados_bench_write(self):
"""Rados bench write test."""
_result = self.run_rados_bench("write", params=["--no-cleanup"])
self.test_results["write"] = (
self.parse_bench_results(_result.get("Stdout", "")))
def test_200_rados_bench_read_seq(self):
"""Rados bench read sequential test."""
_result = self.run_rados_bench("seq")
self.test_results["read_seq"] = (
self.parse_bench_results(_result.get("Stdout", "")))
def test_300_rados_bench_read_rand(self):
"""Rados bench read random test."""
_result = self.run_rados_bench("rand")
self.test_results["read_rand"] = (
self.parse_bench_results(_result.get("Stdout", "")))
def test_998_rados_cleanup(self):
"""Cleanup rados bench data."""
_cmd = "rados -p {} cleanup".format(self.pool)
_result = zaza.model.run_on_leader("ceph-mon", _cmd)
if _result.get("Code") and not _result.get("Code").startswith('0'):
logging.warning("rados cleanup failed")
def test_999_print_rados_bench_results(self):
"""Print rados bench results."""
print("######## Begin Ceph Results ########")
for test, results in self.test_results.items():
print("##### {} ######".format(test))
for key, value in results.items():
print("{}: {}".format(key, value))
print("######## End Ceph Results ########")

View File

@@ -14,7 +14,23 @@
"""Setup for ceph-osd deployments."""
import logging
import zaza.model
def basic_setup():
"""Run basic setup for ceph-osd."""
pass
def ceph_ready():
"""Wait for ceph to be ready.
Wait for ceph to be ready. This is useful if the target_deploy_status in
the tests.yaml is expecting ceph to be in a blocked state. After ceph
has been unblocked the deploy may need to wait for ceph to be ready.
"""
logging.info("Waiting for ceph units to settle")
zaza.model.wait_for_application_states()
zaza.model.block_until_all_units_idle()
logging.info("Ceph units settled")

View File

@@ -544,7 +544,7 @@ class CephRGWTest(test_utils.OpenStackBaseTest):
@classmethod
def setUpClass(cls):
"""Run class setup for running ceph low level tests."""
super(CephRGWTest, cls).setUpClass()
super(CephRGWTest, cls).setUpClass(application_name='ceph-radosgw')
@property
def expected_apps(self):
@@ -622,7 +622,9 @@ class CephRGWTest(test_utils.OpenStackBaseTest):
'multisite configuration')
logging.info('Checking Swift REST API')
keystone_session = zaza_openstack.get_overcloud_keystone_session()
region_name = 'RegionOne'
region_name = zaza_model.get_application_config(
self.application_name,
model_name=self.model_name)['region']['value']
swift_client = zaza_openstack.get_swift_session_client(
keystone_session,
region_name,
@@ -792,6 +794,50 @@ class CephPrometheusTest(unittest.TestCase):
'3', _get_mon_count_from_prometheus(unit.public_address))
class CephPoolConfig(Exception):
"""Custom Exception for bad Ceph pool config."""
pass
class CheckPoolTypes(unittest.TestCase):
"""Test the ceph pools created for clients are of the expected type."""
def test_check_pool_types(self):
"""Check type of pools created for clients."""
app_pools = [
('glance', 'glance'),
('nova-compute', 'nova'),
('cinder-ceph', 'cinder-ceph')]
runtime_pool_details = zaza_ceph.get_ceph_pool_details()
for app, pool_name in app_pools:
juju_pool_config = zaza_model.get_application_config(app).get(
'pool-type')
if juju_pool_config:
expected_pool_type = juju_pool_config['value']
else:
# If the pool-type option is absent assume the default of
# replicated.
expected_pool_type = zaza_ceph.REPLICATED_POOL_TYPE
for pool_config in runtime_pool_details:
if pool_config['pool_name'] == pool_name:
logging.info('Checking {} is {}'.format(
pool_name,
expected_pool_type))
expected_pool_code = -1
if expected_pool_type == zaza_ceph.REPLICATED_POOL_TYPE:
expected_pool_code = zaza_ceph.REPLICATED_POOL_CODE
elif expected_pool_type == zaza_ceph.ERASURE_POOL_TYPE:
expected_pool_code = zaza_ceph.ERASURE_POOL_CODE
self.assertEqual(
pool_config['type'],
expected_pool_code)
break
else:
raise CephPoolConfig(
"Failed to find config for {}".format(pool_name))
# NOTE: We might query before prometheus has fetch data
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1,
min=5, max=10),

View File

@@ -53,8 +53,11 @@ class FullCloudCharmUpgradeTest(unittest.TestCase):
"""Run charm upgrade."""
self.lts.test_launch_small_instance()
applications = zaza.model.get_status().applications
groups = upgrade_utils.get_charm_upgrade_groups()
for group_name, group in groups.items():
groups = upgrade_utils.get_charm_upgrade_groups(
extra_filters=[upgrade_utils._filter_etcd,
upgrade_utils._filter_easyrsa,
upgrade_utils._filter_memcached])
for group_name, group in groups:
logging.info("About to upgrade {} ({})".format(group_name, group))
for application, app_details in applications.items():
if application not in group:

View File

@@ -97,7 +97,7 @@ class CinderBackupTest(test_utils.OpenStackBaseTest):
self.cinder_client.volumes,
cinder_vol.id,
wait_iteration_max_time=180,
stop_after_attempt=15,
stop_after_attempt=30,
expected_status='available',
msg='Volume status wait')
@@ -109,7 +109,7 @@ class CinderBackupTest(test_utils.OpenStackBaseTest):
self.cinder_client.backups,
vol_backup.id,
wait_iteration_max_time=180,
stop_after_attempt=15,
stop_after_attempt=30,
expected_status='available',
msg='Volume status wait')
# Delete the volume

View File

@@ -16,6 +16,7 @@
import logging
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.utilities.deployment_env as deployment_env
CIRROS_IMAGE_NAME = "cirros"
CIRROS_ALT_IMAGE_NAME = "cirros_alt"
@@ -31,7 +32,8 @@ def basic_setup():
"""
def add_image(image_url, glance_client=None, image_name=None, tags=[]):
def add_image(image_url, glance_client=None, image_name=None, tags=[],
properties=None):
"""Retrieve image from ``image_url`` and add it to glance.
:param image_url: Retrievable URL with image data
@@ -42,6 +44,8 @@ def add_image(image_url, glance_client=None, image_name=None, tags=[]):
:type image_name: str
:param tags: List of tags to add to image
:type tags: list of str
:param properties: Properties to add to image
:type properties: dict
"""
if not glance_client:
keystone_session = openstack_utils.get_overcloud_keystone_session()
@@ -60,7 +64,8 @@ def add_image(image_url, glance_client=None, image_name=None, tags=[]):
glance_client,
image_url,
image_name,
tags=tags)
tags=tags,
properties=properties)
def add_cirros_image(glance_client=None, image_name=None):
@@ -90,7 +95,8 @@ def add_cirros_alt_image(glance_client=None, image_name=None):
add_cirros_image(glance_client, image_name)
def add_lts_image(glance_client=None, image_name=None, release=None):
def add_lts_image(glance_client=None, image_name=None, release=None,
properties=None):
"""Add an Ubuntu LTS image to the current deployment.
:param glance: Authenticated glanceclient
@@ -99,12 +105,22 @@ def add_lts_image(glance_client=None, image_name=None, release=None):
:type image_name: str
:param release: Name of ubuntu release.
:type release: str
:param properties: Custom image properties
:type properties: dict
"""
deploy_ctxt = deployment_env.get_deployment_context()
image_arch = deploy_ctxt.get('TEST_IMAGE_ARCH', 'amd64')
arch_image_properties = {
'arm64': {'hw_firmware_type': 'uefi'},
'ppc64el': {'architecture': 'ppc64'}}
properties = properties or arch_image_properties.get(image_arch)
logging.info("Image architecture set to {}".format(image_arch))
image_name = image_name or LTS_IMAGE_NAME
release = release or LTS_RELEASE
image_url = openstack_utils.find_ubuntu_image(
release=release,
arch='amd64')
arch=image_arch)
add_image(image_url,
glance_client=glance_client,
image_name=image_name)
image_name=image_name,
properties=properties)

View File

@@ -16,12 +16,15 @@
"""Encapsulate Gnocchi testing."""
import base64
import boto3
import logging
import pprint
from gnocchiclient.v1 import client as gnocchi_client
import zaza.model as model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities as utilities
import zaza.openstack.utilities.openstack as openstack_utils
@@ -73,7 +76,7 @@ class GnocchiS3Test(test_utils.OpenStackBaseTest):
session = openstack_utils.get_overcloud_keystone_session()
ks_client = openstack_utils.get_keystone_session_client(session)
# Get token data so we can glean our user_id and project_id
# Get token data so we can clean our user_id and project_id
token_data = ks_client.tokens.get_token_data(session.get_token())
project_id = token_data['token']['project']['id']
user_id = token_data['token']['user']['id']
@@ -110,3 +113,32 @@ class GnocchiS3Test(test_utils.OpenStackBaseTest):
break
else:
AssertionError('Bucket "{}" not found'.format(gnocchi_bkt))
class GnocchiExternalCATest(test_utils.OpenStackBaseTest):
"""Test Gnocchi for external root CA config option."""
def test_upload_external_cert(self):
"""Verify that the external CA is uploaded correctly."""
logging.info('Changing value for trusted-external-ca-cert.')
ca_cert_option = 'trusted-external-ca-cert'
ppk, cert = utilities.cert.generate_cert('gnocchi_test.ci.local')
b64_cert = base64.b64encode(cert).decode()
config = {
ca_cert_option: b64_cert,
}
model.set_application_config(
'gnocchi',
config
)
model.block_until_all_units_idle()
files = [
'/usr/local/share/ca-certificates/gnocchi-external.crt',
'/etc/ssl/certs/gnocchi-external.pem',
]
for file in files:
logging.info("Validating that {} is created.".format(file))
model.block_until_file_has_contents('gnocchi', file, 'CERTIFICATE')
logging.info("Found {} successfully.".format(file))

View File

@@ -14,8 +14,12 @@
"""Code for setting up keystone."""
import logging
import keystoneauth1
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.model
import zaza.openstack.utilities.openstack as openstack_utils
from zaza.openstack.charm_tests.keystone import (
BaseKeystoneTest,
@@ -30,6 +34,25 @@ from zaza.openstack.charm_tests.keystone import (
)
def wait_for_cacert(model_name=None):
"""Wait for keystone to install a cacert.
:param model_name: Name of model to query.
:type model_name: str
"""
logging.info("Waiting for cacert")
zaza.model.block_until_file_has_contents(
'keystone',
openstack_utils.KEYSTONE_REMOTE_CACERT,
'CERTIFICATE',
model_name=model_name)
zaza.model.block_until_all_units_idle(model_name=model_name)
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get('target_deploy_status', {}),
model_name=model_name)
def add_demo_user():
"""Add a demo user to the current deployment."""
def _v2():

View File

@@ -21,7 +21,7 @@ import keystoneauth1
import zaza.model
import zaza.openstack.utilities.exceptions as zaza_exceptions
import zaza.openstack.utilities.juju as juju_utils
import zaza.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.openstack.charm_tests.test_utils as test_utils
@@ -262,6 +262,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
openrc['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT
openrc['OS_AUTH_URL'] = (
openrc['OS_AUTH_URL'].replace('http', 'https'))
logging.info('keystone IP {}'.format(ip))
keystone_session = openstack_utils.get_keystone_session(
openrc)
keystone_client = openstack_utils.get_keystone_session_client(
@@ -319,10 +320,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
'OS_PROJECT_DOMAIN_NAME': DEMO_DOMAIN,
'OS_PROJECT_NAME': DEMO_PROJECT,
}
with self.config_change(
{'preferred-api-version': self.default_api_version},
{'preferred-api-version': self.api_v3},
application_name="keystone"):
with self.v3_keystone_preferred():
for ip in self.keystone_ips:
openrc.update(
{'OS_AUTH_URL': 'http://{}:5000/v3'.format(ip)})

View File

@@ -134,6 +134,26 @@ class MasakariTest(test_utils.OpenStackBaseTest):
vm_uuid,
model_name=self.model_name)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=2, max=60),
reraise=True, stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_exception_type(AssertionError))
def wait_for_guest_ready(self, vm_name):
"""Wait for the guest to be ready.
:param vm_name: Name of guest to check.
:type vm_name: str
"""
guest_ready_attr_checks = [
('OS-EXT-STS:task_state', None),
('status', 'ACTIVE'),
('OS-EXT-STS:power_state', 1),
('OS-EXT-STS:vm_state', 'active')]
guest = self.nova_client.servers.find(name=vm_name)
logging.info('Checking guest {} attributes'.format(vm_name))
for (attr, required_state) in guest_ready_attr_checks:
logging.info('Checking {} is {}'.format(attr, required_state))
assert getattr(guest, attr) == required_state
def test_instance_failover(self):
"""Test masakari managed guest migration."""
# Workaround for Bug #1874719
@@ -168,6 +188,7 @@ class MasakariTest(test_utils.OpenStackBaseTest):
model_name=self.model_name)
openstack_utils.enable_all_nova_services(self.nova_client)
zaza.openstack.configure.masakari.enable_hosts()
self.wait_for_guest_ready(vm_name)
def test_instance_restart_on_fail(self):
"""Test single guest crash and recovery."""
@@ -178,6 +199,7 @@ class MasakariTest(test_utils.OpenStackBaseTest):
self.current_release))
vm_name = 'zaza-test-instance-failover'
vm = self.ensure_guest(vm_name)
self.wait_for_guest_ready(vm_name)
_, unit_name = self.get_guests_compute_info(vm_name)
logging.info('{} is running on {}'.format(vm_name, unit_name))
guest_pid = self.get_guest_qemu_pid(

View File

@@ -29,6 +29,9 @@ import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.utilities.generic as generic_utils
PXC_SEEDED_FILE = "/var/lib/percona-xtradb-cluster/seeded"
class MySQLBaseTest(test_utils.OpenStackBaseTest):
"""Base for mysql charm tests."""
@@ -149,7 +152,7 @@ class MySQLCommonTests(MySQLBaseTest):
set_alternate = {"max-connections": "1000"}
# Make config change, check for service restarts
logging.debug("Setting max connections ...")
logging.info("Setting max connections ...")
self.restart_on_changed(
self.conf_file,
set_default,
@@ -198,7 +201,7 @@ class PerconaClusterBaseTest(MySQLBaseTest):
output = zaza.model.run_on_leader(
self.application, cmd)["Stdout"].strip()
value = re.search(r"^.+?\s+(.+)", output).group(1)
logging.debug("%s = %s" % (attr, value))
logging.info("%s = %s" % (attr, value))
return value
def is_pxc_bootstrapped(self):
@@ -236,7 +239,7 @@ class PerconaClusterBaseTest(MySQLBaseTest):
cmd = "ip -br addr"
result = zaza.model.run_on_unit(unit.entity_id, cmd)
output = result.get("Stdout").strip()
logging.debug(output)
logging.info(output)
if self.vip in output:
logging.info("vip ({}) running in {}".format(
self.vip,
@@ -265,6 +268,10 @@ class PerconaClusterCharmTests(MySQLCommonTests, PerconaClusterBaseTest):
" (wanted=%s, cluster_size=%s)" % (self.units, cluster_size))
assert cluster_size >= self.units, msg
logging.info("Ensuring PXC seeded file is present")
zaza.model.block_until_file_has_contents(self.application,
PXC_SEEDED_FILE, "done")
def test_130_change_root_password(self):
"""Change root password.
@@ -333,12 +340,12 @@ class PerconaClusterColdStartTest(PerconaClusterBaseTest):
juju_utils.get_machine_uuids_for_application(self.application))
# Stop Nodes
# Avoid hitting an update-status hook
logging.debug("Wait till model is idle ...")
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Stopping instances: {}".format(_machines))
for uuid in _machines:
self.nova_client.servers.stop(uuid)
logging.debug("Wait till all machines are shutoff ...")
logging.info("Wait till all machines are shutoff ...")
for uuid in _machines:
openstack_utils.resource_reaches_status(self.nova_client.servers,
uuid,
@@ -357,7 +364,7 @@ class PerconaClusterColdStartTest(PerconaClusterBaseTest):
'unknown',
negate_match=True)
logging.debug("Wait till model is idle ...")
logging.info("Wait till model is idle ...")
# XXX If a hook was executing on a unit when it was powered off
# it comes back in an error state.
try:
@@ -366,7 +373,7 @@ class PerconaClusterColdStartTest(PerconaClusterBaseTest):
self.resolve_update_status_errors()
zaza.model.block_until_all_units_idle()
logging.debug("Wait for application states ...")
logging.info("Wait for application states ...")
for unit in zaza.model.get_units(self.application):
try:
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
@@ -389,7 +396,7 @@ class PerconaClusterColdStartTest(PerconaClusterBaseTest):
_non_leaders[0],
"bootstrap-pxc",
action_params={})
logging.debug("Wait for application states ...")
logging.info("Wait for application states ...")
for unit in zaza.model.get_units(self.application):
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
states = {"percona-cluster": {
@@ -403,7 +410,7 @@ class PerconaClusterColdStartTest(PerconaClusterBaseTest):
self.application,
"notify-bootstrapped",
action_params={})
logging.debug("Wait for application states ...")
logging.info("Wait for application states ...")
for unit in zaza.model.get_units(self.application):
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
test_config = lifecycle_utils.get_charm_config(fatal=False)
@@ -521,7 +528,7 @@ class MySQLInnoDBClusterColdStartTest(MySQLBaseTest):
zaza.model.resolve_units(
application_name=self.application,
erred_hook='update-status',
wait=True)
wait=True, timeout=180)
def test_100_reboot_cluster_from_complete_outage(self):
"""Reboot cluster from complete outage.
@@ -532,12 +539,12 @@ class MySQLInnoDBClusterColdStartTest(MySQLBaseTest):
juju_utils.get_machine_uuids_for_application(self.application))
# Stop Nodes
# Avoid hitting an update-status hook
logging.debug("Wait till model is idle ...")
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Stopping instances: {}".format(_machines))
for uuid in _machines:
self.nova_client.servers.stop(uuid)
logging.debug("Wait till all machines are shutoff ...")
logging.info("Wait till all machines are shutoff ...")
for uuid in _machines:
openstack_utils.resource_reaches_status(self.nova_client.servers,
uuid,
@@ -550,38 +557,37 @@ class MySQLInnoDBClusterColdStartTest(MySQLBaseTest):
for uuid in _machines:
self.nova_client.servers.start(uuid)
logging.info(
"Wait till all {} units are in state 'unkown' ..."
.format(self.application))
for unit in zaza.model.get_units(self.application):
zaza.model.block_until_unit_wl_status(
unit.entity_id,
'unknown',
negate_match=True)
logging.debug("Wait till model is idle ...")
logging.info("Wait till model is idle ...")
try:
zaza.model.block_until_all_units_idle()
except zaza.model.UnitError:
self.resolve_update_status_errors()
zaza.model.block_until_all_units_idle()
logging.debug("Clear error hooks after reboot ...")
logging.info("Clear error hooks after reboot ...")
for unit in zaza.model.get_units(self.application):
try:
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
except zaza.model.UnitError:
self.resolve_update_status_errors()
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
logging.debug("Wait for application states blocked ...")
states = {
self.application: {
"workload-status": "blocked",
"workload-status-message":
"MySQL InnoDB Cluster not healthy: None"},
"mysql-router": {
"workload-status": "blocked",
"workload-status-message":
"Failed to connect to MySQL"}}
zaza.model.wait_for_application_states(states=states)
logging.info(
"Wait till all {} units are in state 'blocked' ..."
.format(self.application))
for unit in zaza.model.get_units(self.application):
zaza.model.block_until_unit_wl_status(
unit.entity_id,
'blocked')
logging.info("Execute reboot-cluster-from-complete-outage "
"action after cold boot ...")
@@ -592,15 +598,15 @@ class MySQLInnoDBClusterColdStartTest(MySQLBaseTest):
unit.entity_id,
"reboot-cluster-from-complete-outage",
action_params={})
if "Success" in action.data["results"].get("outcome"):
if "Success" in action.data.get("results", {}).get("outcome", ""):
break
else:
logging.info(action.data["results"].get("output"))
logging.info(action.data.get("results", {}).get("output", ""))
assert "Success" in action.data["results"]["outcome"], (
"Reboot cluster from complete outage action failed: {}"
.format(action.data))
logging.debug("Wait for application states ...")
logging.info("Wait for application states ...")
for unit in zaza.model.get_units(self.application):
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
test_config = lifecycle_utils.get_charm_config(fatal=False)

View File

@@ -290,19 +290,31 @@ class NeutronCreateNetworkTest(test_utils.OpenStackBaseTest):
def test_400_create_network(self):
"""Create a network, verify that it exists, and then delete it."""
self._wait_for_neutron_ready()
self._assert_test_network_doesnt_exist()
self._create_test_network()
net_id = self._assert_test_network_exists_and_return_id()
self._delete_test_network(net_id)
self._assert_test_network_doesnt_exist()
@classmethod
def _wait_for_neutron_ready(cls):
logging.info('Waiting for Neutron to become ready...')
zaza.model.wait_for_application_states()
for attempt in tenacity.Retrying(
wait=tenacity.wait_fixed(5), # seconds
stop=tenacity.stop_after_attempt(12),
reraise=True):
with attempt:
cls.neutron_client.list_networks()
def _create_test_network(self):
logging.debug('Creating neutron network...')
logging.info('Creating neutron network...')
network = {'name': self._TEST_NET_NAME}
self.neutron_client.create_network({'network': network})
def _delete_test_network(self, net_id):
logging.debug('Deleting neutron network...')
logging.info('Deleting neutron network...')
self.neutron_client.delete_network(net_id)
def _assert_test_network_exists_and_return_id(self):
@@ -608,27 +620,10 @@ class NeutronNetworkingBase(test_utils.OpenStackBaseTest):
@classmethod
def setUpClass(cls):
"""Run class setup for running Neutron API Networking tests."""
super(NeutronNetworkingBase, cls).setUpClass()
super(NeutronNetworkingBase, cls).setUpClass(
application_name='neutron-api')
cls.neutron_client = (
openstack_utils.get_neutron_session_client(cls.keystone_session))
# NOTE(fnordahl): in the event of a test failure we do not want to run
# tear down code as it will make debugging a problem virtually
# impossible. To alleviate each test method will set the
# `run_tearDown` instance variable at the end which will let us run
# tear down only when there were no failure.
cls.run_tearDown = False
@classmethod
def tearDown(cls):
"""Remove test resources."""
if cls.run_tearDown:
logging.info('Running teardown')
for server in cls.nova_client.servers.list():
if server.name.startswith(cls.RESOURCE_PREFIX):
openstack_utils.delete_resource(
cls.nova_client.servers,
server.id,
msg="server")
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
reraise=True, stop=tenacity.stop_after_attempt(8))
@@ -798,7 +793,7 @@ class NeutronNetworkingTest(NeutronNetworkingBase):
self.launch_guests()
instance_1, instance_2 = self.retrieve_guests()
self.check_connectivity(instance_1, instance_2)
self.run_tearDown = True
self.run_resource_cleanup = True
class NeutronNetworkingVRRPTests(NeutronNetworkingBase):

View File

@@ -29,25 +29,40 @@ class NeutronCreateAristaNetworkTest(neutron_tests.NeutronCreateNetworkTest):
def setUpClass(cls):
"""Run class setup for running Neutron Arista tests."""
super(NeutronCreateAristaNetworkTest, cls).setUpClass()
cls._wait_for_neutron_ready()
logging.info('Waiting for Neutron to become ready...')
def _assert_test_network_exists_and_return_id(self):
logging.info('Checking that the test network exists on the Arista '
'test fixture...')
# Sometimes the API call from Neutron to Arista fails and Neutron
# retries a couple of seconds later, which is why the newly created
# test network may not be immediately visible on Arista's API.
# NOTE(lourot): I experienced a run where it took 53 seconds.
for attempt in tenacity.Retrying(
wait=tenacity.wait_fixed(5), # seconds
wait=tenacity.wait_fixed(10), # seconds
stop=tenacity.stop_after_attempt(12),
reraise=True):
with attempt:
cls.neutron_client.list_networks()
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [self._TEST_NET_NAME])
def _assert_test_network_exists_and_return_id(self):
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [self._TEST_NET_NAME])
return super(NeutronCreateAristaNetworkTest,
self)._assert_test_network_exists_and_return_id()
def _assert_test_network_doesnt_exist(self):
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [])
logging.info("Checking that the test network doesn't exist on the "
"Arista test fixture...")
for attempt in tenacity.Retrying(
wait=tenacity.wait_fixed(10), # seconds
stop=tenacity.stop_after_attempt(12),
reraise=True):
with attempt:
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [])
super(NeutronCreateAristaNetworkTest,
self)._assert_test_network_doesnt_exist()

View File

@@ -56,6 +56,16 @@ class LTSGuestCreateTest(BaseGuestCreateTest):
glance_setup.LTS_IMAGE_NAME)
class LTSGuestCreateVolumeBackedTest(BaseGuestCreateTest):
"""Tests to launch a LTS image."""
def test_launch_small_instance(self):
"""Launch a Bionic instance and test connectivity."""
zaza.openstack.configure.guest.launch_instance(
glance_setup.LTS_IMAGE_NAME,
use_boot_volume=True)
class NovaCompute(test_utils.OpenStackBaseTest):
"""Run nova-compute specific tests."""

View File

@@ -98,25 +98,6 @@ def configure_octavia():
pass
def prepare_payload_instance():
"""Prepare a instance we can use as payload test."""
session = openstack.get_overcloud_keystone_session()
keystone = openstack.get_keystone_session_client(session)
neutron = openstack.get_neutron_session_client(session)
project_id = openstack.get_project_id(
keystone, 'admin', domain_name='admin_domain')
openstack.add_neutron_secgroup_rules(
neutron,
project_id,
[{'protocol': 'tcp',
'port_range_min': '80',
'port_range_max': '80',
'direction': 'ingress'}])
zaza.openstack.configure.guest.launch_instance(
glance_setup.LTS_IMAGE_NAME,
userdata='#cloud-config\npackages:\n - apache2\n')
def centralized_fip_network():
"""Create network with centralized router for connecting lb and fips.

View File

@@ -38,7 +38,20 @@ class CharmOperationTest(test_utils.OpenStackBaseTest):
Pause service and check services are stopped, then resume and check
they are started.
"""
self.pause_resume(['apache2'])
services = [
'apache2',
'octavia-health-manager',
'octavia-housekeeping',
'octavia-worker',
]
if openstack_utils.ovn_present():
services.append('octavia-driver-agent')
logging.info('Skipping pause resume test LP: #1886202...')
return
logging.info('Testing pause resume (services="{}")'
.format(services))
with self.pause_resume(services, pgrep_full=True):
pass
class LBAASv2Test(test_utils.OpenStackBaseTest):
@@ -48,12 +61,13 @@ class LBAASv2Test(test_utils.OpenStackBaseTest):
def setUpClass(cls):
"""Run class setup for running LBaaSv2 service tests."""
super(LBAASv2Test, cls).setUpClass()
cls.keystone_session = openstack_utils.get_overcloud_keystone_session()
cls.keystone_client = openstack_utils.get_keystone_session_client(
cls.keystone_session)
cls.neutron_client = openstack_utils.get_neutron_session_client(
cls.keystone_session)
cls.octavia_client = openstack_utils.get_octavia_session_client(
cls.keystone_session)
cls.RESOURCE_PREFIX = 'zaza-octavia'
# NOTE(fnordahl): in the event of a test failure we do not want to run
# tear down code as it will make debugging a problem virtually
@@ -63,28 +77,24 @@ class LBAASv2Test(test_utils.OpenStackBaseTest):
cls.run_tearDown = False
# List of load balancers created by this test
cls.loadbalancers = []
# LIst of floating IPs created by this test
# List of floating IPs created by this test
cls.fips = []
@classmethod
def tearDown(cls):
"""Remove resources created during test execution.
Note that resources created in the configure step prior to executing
the test should not be touched here.
"""
if not cls.run_tearDown:
return
for lb in cls.loadbalancers:
cls.octavia_client.load_balancer_delete(lb['id'], cascade=True)
def resource_cleanup(self):
"""Remove resources created during test execution."""
for lb in self.loadbalancers:
self.octavia_client.load_balancer_delete(lb['id'], cascade=True)
try:
cls.wait_for_lb_resource(
cls.octavia_client.load_balancer_show, lb['id'],
self.wait_for_lb_resource(
self.octavia_client.load_balancer_show, lb['id'],
provisioning_status='DELETED')
except osc_lib.exceptions.NotFound:
pass
for fip in cls.fips:
cls.neutron_client.delete_floatingip(fip)
for fip in self.fips:
self.neutron_client.delete_floatingip(fip)
# we run the parent resource_cleanup last as it will remove instances
# referenced as members in the above cleaned up load balancers
super(LBAASv2Test, self).resource_cleanup()
@staticmethod
@tenacity.retry(retry=tenacity.retry_if_exception_type(AssertionError),
@@ -238,12 +248,27 @@ class LBAASv2Test(test_utils.OpenStackBaseTest):
def test_create_loadbalancer(self):
"""Create load balancer."""
nova_client = openstack_utils.get_nova_session_client(
self.keystone_session)
# Prepare payload instances
# First we allow communication to port 80 by adding a security group
# rule
project_id = openstack_utils.get_project_id(
self.keystone_client, 'admin', domain_name='admin_domain')
openstack_utils.add_neutron_secgroup_rules(
self.neutron_client,
project_id,
[{'protocol': 'tcp',
'port_range_min': '80',
'port_range_max': '80',
'direction': 'ingress'}])
# Then we request two Ubuntu instances with the Apache web server
# installed
instance_1, instance_2 = self.launch_guests(
userdata='#cloud-config\npackages:\n - apache2\n')
# Get IP of the prepared payload instances
payload_ips = []
for server in nova_client.servers.list():
for server in (instance_1, instance_2):
payload_ips.append(server.networks['private'][0])
self.assertTrue(len(payload_ips) > 0)
@@ -274,4 +299,4 @@ class LBAASv2Test(test_utils.OpenStackBaseTest):
lb_fp['floating_ip_address']))
# If we get here, it means the tests passed
self.run_tearDown = True
self.run_resource_cleanup = True

View File

@@ -401,6 +401,10 @@ class BasePolicydSpecialization(PolicydTest,
def test_003_test_overide_is_observed(self):
"""Test that the override is observed by the underlying service."""
if (openstack_utils.get_os_release() <
openstack_utils.get_os_release('groovy_victoria')):
raise unittest.SkipTest(
"Test skipped until Bug #1880959 is fix released")
if self._test_name is None:
logging.info("Doing policyd override for {}"
.format(self._service_name))
@@ -655,7 +659,7 @@ class HeatTests(BasePolicydSpecialization):
class OctaviaTests(BasePolicydSpecialization):
"""Test the policyd override using the octavia client."""
_rule = {'rule.yaml': "{'os_load-balancer_api:loadbalancer:get_one': '!'}"}
_rule = {'rule.yaml': "{'os_load-balancer_api:provider:get_all': '!'}"}
@classmethod
def setUpClass(cls, application_name=None):
@@ -663,89 +667,8 @@ class OctaviaTests(BasePolicydSpecialization):
super(OctaviaTests, cls).setUpClass(application_name="octavia")
cls.application_name = "octavia"
def setup_for_attempt_operation(self, ip):
"""Create a loadbalancer.
This is necessary so that the attempt is to show the load-balancer and
this is an operator that the policy can stop. Unfortunately, octavia,
whilst it has a policy for just listing load-balancers, unfortunately,
it doesn't work; whereas showing the load-balancer can be stopped.
NB this only works if the setup phase of the octavia tests have been
completed.
:param ip: the ip of for keystone.
:type ip: str
"""
logging.info("Setting up loadbalancer.")
auth = openstack_utils.get_overcloud_auth(address=ip)
sess = openstack_utils.get_keystone_session(auth)
octavia_client = openstack_utils.get_octavia_session_client(sess)
neutron_client = openstack_utils.get_neutron_session_client(sess)
if openstack_utils.dvr_enabled():
network_name = 'private_lb_fip_network'
else:
network_name = 'private'
resp = neutron_client.list_networks(name=network_name)
vip_subnet_id = resp['networks'][0]['subnets'][0]
res = octavia_client.load_balancer_create(
json={
'loadbalancer': {
'description': 'Created by Zaza',
'admin_state_up': True,
'vip_subnet_id': vip_subnet_id,
'name': 'zaza-lb-0',
}})
self.lb_id = res['loadbalancer']['id']
# now wait for it to get to the active state
@tenacity.retry(wait=tenacity.wait_fixed(1),
reraise=True, stop=tenacity.stop_after_delay(900))
def wait_for_lb_resource(client, resource_id):
resp = client.load_balancer_show(resource_id)
logging.info(resp['provisioning_status'])
assert resp['provisioning_status'] == 'ACTIVE', (
'load balancer resource has not reached '
'expected provisioning status: {}'
.format(resp))
return resp
logging.info('Awaiting loadbalancer to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client, self.lb_id)
logging.info(resp)
logging.info("Setup loadbalancer complete.")
def cleanup_for_attempt_operation(self, ip):
"""Remove the loadbalancer.
:param ip: the ip of for keystone.
:type ip: str
"""
logging.info("Deleting loadbalancer {}.".format(self.lb_id))
auth = openstack_utils.get_overcloud_auth(address=ip)
sess = openstack_utils.get_keystone_session(auth)
octavia_client = openstack_utils.get_octavia_session_client(sess)
octavia_client.load_balancer_delete(self.lb_id)
logging.info("Deleting loadbalancer in progress ...")
@tenacity.retry(wait=tenacity.wait_fixed(1),
reraise=True, stop=tenacity.stop_after_delay(900))
def wait_til_deleted(client, lb_id):
lb_list = client.load_balancer_list()
ids = [lb['id'] for lb in lb_list['loadbalancers']]
assert lb_id not in ids, 'load balancer still deleting'
wait_til_deleted(octavia_client, self.lb_id)
logging.info("Deleted loadbalancer.")
def get_client_and_attempt_operation(self, ip):
"""Attempt to show the loadbalancer as a policyd override.
"""Attempt to list available provider drivers.
This operation should pass normally, and fail when
the rule has been overriden (see the `rule` class variable.
@@ -757,6 +680,6 @@ class OctaviaTests(BasePolicydSpecialization):
octavia_client = openstack_utils.get_octavia_session_client(
self.get_keystone_session_admin_user(ip))
try:
octavia_client.load_balancer_show(self.lb_id)
octavia_client.provider_list()
except octaviaclient.OctaviaClientException:
raise PolicydOperationFailedException()

View File

@@ -21,6 +21,7 @@ import requests
import zaza.model
from zaza.openstack.charm_tests.keystone import BaseKeystoneTest
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.openstack.utilities.openstack as openstack_utils
class FailedToReachIDP(Exception):
@@ -42,6 +43,8 @@ class CharmKeystoneSAMLMellonTest(BaseKeystoneTest):
cls.test_config = lifecycle_utils.get_charm_config()
cls.application_name = cls.test_config['charm_name']
cls.action = "get-sp-metadata"
cls.current_release = openstack_utils.get_os_release()
cls.FOCAL_USSURI = openstack_utils.get_os_release("focal_ussuri")
def test_run_get_sp_metadata_action(self):
"""Validate the get-sp-metadata action."""
@@ -92,8 +95,13 @@ class CharmKeystoneSAMLMellonTest(BaseKeystoneTest):
else:
proto = "http"
# Use Keystone URL for < Focal
if self.current_release < self.FOCAL_USSURI:
region = "{}://{}:5000/v3".format(proto, keystone_ip)
else:
region = "default"
url = "{}://{}/horizon/auth/login/".format(proto, horizon_ip)
region = "{}://{}:5000/v3".format(proto, keystone_ip)
horizon_expect = ('<option value="samltest_mapped">'
'samltest.id</option>')

View File

@@ -73,11 +73,14 @@ class ParallelSeriesUpgradeTest(unittest.TestCase):
workaround_script = None
files = []
applications = model.get_status().applications
for group_name, apps in upgrade_groups.items():
for group_name, apps in upgrade_groups:
logging.info("About to upgrade {} from {} to {}".format(
group_name, from_series, to_series))
upgrade_functions = []
if group_name in ["Stateful Services", "Data Plane", "sweep_up"]:
if group_name in ["Database Services",
"Stateful Services",
"Data Plane",
"sweep_up"]:
logging.info("Going to upgrade {} unit by unit".format(apps))
upgrade_function = \
parallel_series_upgrade.serial_series_upgrade

View File

@@ -30,22 +30,6 @@ from zaza.openstack.utilities import (
from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest
def _filter_easyrsa(app, app_config, model_name=None):
charm_name = upgrade_utils.extract_charm_name_from_url(app_config['charm'])
if "easyrsa" in charm_name:
logging.warn("Skipping series upgrade of easyrsa Bug #1850121")
return True
return False
def _filter_etcd(app, app_config, model_name=None):
charm_name = upgrade_utils.extract_charm_name_from_url(app_config['charm'])
if "etcd" in charm_name:
logging.warn("Skipping series upgrade of easyrsa Bug #1850124")
return True
return False
class SeriesUpgradeTest(unittest.TestCase):
"""Class to encapsulate Series Upgrade Tests."""
@@ -75,7 +59,7 @@ class SeriesUpgradeTest(unittest.TestCase):
continue
if "etcd" in app_details["charm"]:
logging.warn(
"Skipping series upgrade of easyrsa Bug #1850124")
"Skipping series upgrade of etcd Bug #1850124")
continue
charm_name = upgrade_utils.extract_charm_name_from_url(
app_details['charm'])
@@ -208,10 +192,11 @@ class ParallelSeriesUpgradeTest(unittest.TestCase):
# Set Feature Flag
os.environ["JUJU_DEV_FEATURE_FLAGS"] = "upgrade-series"
upgrade_groups = upgrade_utils.get_series_upgrade_groups(
extra_filters=[_filter_etcd, _filter_easyrsa])
extra_filters=[upgrade_utils._filter_etcd,
upgrade_utils._filter_easyrsa])
applications = model.get_status().applications
completed_machines = []
for group_name, group in upgrade_groups.items():
for group_name, group in upgrade_groups:
logging.warn("About to upgrade {} ({})".format(group_name, group))
upgrade_group = []
for application, app_details in applications.items():

View File

@@ -16,11 +16,12 @@
import jinja2
import urllib.parse
import subprocess
import os
import zaza.utilities.deployment_env as deployment_env
import zaza.openstack.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.tempest.utils as tempest_utils
import zaza.openstack.charm_tests.glance.setup as glance_setup
SETUP_ENV_VARS = {
@@ -235,6 +236,9 @@ def get_tempest_context():
'cinder': add_cinder_config,
'keystone': add_keystone_config}
ctxt['enabled_services'] = get_service_list(keystone_session)
if set(['cinderv2', 'cinderv3']) \
.intersection(set(ctxt['enabled_services'])):
ctxt['enabled_services'].append('cinder')
ctxt['disabled_services'] = list(
set(TEMPEST_SVC_LIST) - set(ctxt['enabled_services']))
add_application_ips(ctxt)
@@ -276,21 +280,15 @@ def setup_tempest(tempest_template, accounts_template):
:returns: None
:rtype: None
"""
try:
subprocess.check_call(['tempest', 'workspace', 'remove', '--rmdir',
'--name', 'tempest-workspace'])
except subprocess.CalledProcessError:
pass
try:
subprocess.check_call(['tempest', 'init', 'tempest-workspace'])
except subprocess.CalledProcessError:
pass
workspace_name, workspace_path = tempest_utils.get_workspace()
tempest_utils.destroy_workspace(workspace_name, workspace_path)
tempest_utils.init_workspace(workspace_path)
render_tempest_config(
'tempest-workspace/etc/tempest.conf',
os.path.join(workspace_path, 'etc/tempest.conf'),
get_tempest_context(),
tempest_template)
render_tempest_config(
'tempest-workspace/etc/accounts.yaml',
os.path.join(workspace_path, 'etc/accounts.yaml'),
get_tempest_context(),
accounts_template)

View File

@@ -34,7 +34,7 @@ attach_encrypted_volume = false
{% if 'keystone' in enabled_services %}
[identity]
uri = {proto}://{{ keystone }}:5000/v2.0
uri = {{ proto }}://{{ keystone }}:5000/v2.0
auth_version = v2
admin_role = Admin
region = RegionOne

View File

@@ -20,6 +20,7 @@ import subprocess
import zaza
import zaza.charm_lifecycle.utils
import zaza.charm_lifecycle.test
import zaza.openstack.charm_tests.tempest.utils as tempest_utils
import tempfile
@@ -33,21 +34,24 @@ class TempestTest():
Test keys are parsed from ['tests_options']['tempest']['model'], where
valid test keys are: smoke (bool), whitelist (list of tests), blacklist
(list of tests), and regex (list of regex's).
(list of tests), regex (list of regex's), and keep-workspace (bool).
:returns: Status of tempest run
:rtype: bool
"""
result = True
charm_config = zaza.charm_lifecycle.utils.get_charm_config()
workspace_name, workspace_path = tempest_utils.get_workspace()
tempest_options = ['tempest', 'run', '--workspace',
'tempest-workspace', '--config',
'tempest-workspace/etc/tempest.conf']
workspace_name, '--config',
os.path.join(workspace_path, 'etc/tempest.conf')]
for model_alias in zaza.model.get_juju_model_aliases().keys():
tempest_test_key = model_alias
if model_alias == zaza.charm_lifecycle.utils.DEFAULT_MODEL_ALIAS:
tempest_test_key = 'default'
config = charm_config['tests_options']['tempest'][tempest_test_key]
if config.get('smoke'):
smoke = config.get('smoke')
if smoke and smoke is True:
tempest_options.extend(['--smoke'])
if config.get('regex'):
tempest_options.extend(
@@ -74,5 +78,9 @@ class TempestTest():
try:
subprocess.check_call(tempest_options)
except subprocess.CalledProcessError:
return False
return True
result = False
break
keep_workspace = config.get('keep-workspace')
if not keep_workspace or keep_workspace is not True:
tempest_utils.destroy_workspace(workspace_name, workspace_path)
return result

View File

@@ -0,0 +1,67 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility code for working with tempest workspaces."""
import os
from pathlib import Path
import shutil
import subprocess
import zaza.model as model
def get_workspace():
"""Get tempest workspace name and path.
:returns: A tuple containing tempest workspace name and workspace path
:rtype: Tuple[str, str]
"""
home = str(Path.home())
workspace_name = model.get_juju_model()
workspace_path = os.path.join(home, '.tempest', workspace_name)
return (workspace_name, workspace_path)
def destroy_workspace(workspace_name, workspace_path):
"""Delete tempest workspace.
:param workspace_name: name of workspace
:type workspace_name: str
:param workspace_path: directory path where workspace is stored
:type workspace_path: str
:returns: None
:rtype: None
"""
try:
subprocess.check_call(['tempest', 'workspace', 'remove', '--rmdir',
'--name', workspace_name])
except (subprocess.CalledProcessError, FileNotFoundError):
pass
if os.path.isdir(workspace_path):
shutil.rmtree(workspace_path)
def init_workspace(workspace_path):
"""Initialize tempest workspace.
:param workspace_path: directory path where workspace is stored
:type workspace_path: str
:returns: None
:rtype: None
"""
try:
subprocess.check_call(['tempest', 'init', workspace_path])
except subprocess.CalledProcessError:
pass

View File

@@ -100,8 +100,7 @@ class BaseCharmTest(unittest.TestCase):
run_resource_cleanup = False
@classmethod
def resource_cleanup(cls):
def resource_cleanup(self):
"""Cleanup any resources created during the test run.
Override this method with a method which removes any resources
@@ -111,12 +110,13 @@ class BaseCharmTest(unittest.TestCase):
"""
pass
@classmethod
def tearDown(cls):
# this must be a class instance method otherwise descentents will not be
# able to influence if cleanup should be run.
def tearDown(self):
"""Run teardown for test class."""
if cls.run_resource_cleanup:
if self.run_resource_cleanup:
logging.info('Running resource cleanup')
cls.resource_cleanup()
self.resource_cleanup()
@classmethod
def setUpClass(cls, application_name=None, model_alias=None):
@@ -440,6 +440,21 @@ class OpenStackBaseTest(BaseCharmTest):
cls.nova_client = (
openstack_utils.get_nova_session_client(cls.keystone_session))
def resource_cleanup(self):
"""Remove test resources."""
try:
logging.info('Removing instances launched by test ({}*)'
.format(self.RESOURCE_PREFIX))
for server in self.nova_client.servers.list():
if server.name.startswith(self.RESOURCE_PREFIX):
openstack_utils.delete_resource(
self.nova_client.servers,
server.id,
msg="server")
except AttributeError:
# Test did not define self.RESOURCE_PREFIX, ignore.
pass
def launch_guest(self, guest_name, userdata=None):
"""Launch two guests to use in tests.

View File

@@ -16,6 +16,7 @@
import base64
import functools
import logging
import requests
import tempfile
@@ -99,7 +100,7 @@ async def async_mojo_unseal_by_unit():
unit_name, './hooks/update-status')
def auto_initialize(cacert=None, validation_application='keystone'):
def auto_initialize(cacert=None, validation_application='keystone', wait=True):
"""Auto initialize vault for testing.
Generate a csr and uploading a signed certificate.
@@ -114,6 +115,7 @@ def auto_initialize(cacert=None, validation_application='keystone'):
:returns: None
:rtype: None
"""
logging.info('Running auto_initialize')
basic_setup(cacert=cacert, unseal_and_authorize=True)
action = vault_utils.run_get_csr()
@@ -131,10 +133,11 @@ def auto_initialize(cacert=None, validation_application='keystone'):
root_ca=cacertificate,
allowed_domains='openstack.local')
zaza.model.wait_for_agent_status()
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get('target_deploy_status', {}))
if wait:
zaza.model.wait_for_agent_status()
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get('target_deploy_status', {}))
if validation_application:
validate_ca(cacertificate, application=validation_application)
@@ -163,6 +166,12 @@ auto_initialize_no_validation = functools.partial(
validation_application=None)
auto_initialize_no_validation_no_wait = functools.partial(
auto_initialize,
validation_application=None,
wait=False)
def validate_ca(cacertificate, application="keystone", port=5000):
"""Validate Certificate Authority against application.

View File

@@ -51,6 +51,10 @@ class BaseVaultTest(test_utils.OpenStackBaseTest):
vault_utils.auth_all(cls.clients, cls.vault_creds['root_token'])
vault_utils.ensure_secret_backend(cls.clients[0])
def tearDown(self):
"""Tun test cleanup for Vault tests."""
vault_utils.unseal_all(self.clients, self.vault_creds['keys'][0])
@contextlib.contextmanager
def pause_resume(self, services, pgrep_full=False):
"""Override pause_resume for Vault behavior."""

View File

@@ -5,6 +5,11 @@ import logging
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.model as zaza_model
REPLICATED_POOL_TYPE = 'replicated'
ERASURE_POOL_TYPE = 'erasure-coded'
REPLICATED_POOL_CODE = 1
ERASURE_POOL_CODE = 3
def get_expected_pools(radosgw=False):
"""Get expected ceph pools.
@@ -97,6 +102,39 @@ def get_ceph_pools(unit_name, model_name=None):
return pools
def get_ceph_pool_details(query_leader=True, unit_name=None, model_name=None):
"""Get ceph pool details.
Return a list of ceph pools details dicts.
:param query_leader: Whether to query the leader for pool details.
:type query_leader: bool
:param unit_name: Name of unit to get the pools on if query_leader is False
:type unit_name: string
:param model_name: Name of model to operate in
:type model_name: str
:returns: Dict of ceph pools
:rtype: List[Dict,]
:raise: zaza_model.CommandRunFailed
"""
cmd = 'sudo ceph osd pool ls detail -f json'
if query_leader and unit_name:
raise ValueError("Cannot set query_leader and unit_name")
if query_leader:
result = zaza_model.run_on_leader(
'ceph-mon',
cmd,
model_name=model_name)
else:
result = zaza_model.run_on_unit(
unit_name,
cmd,
model_name=model_name)
if int(result.get('Code')) != 0:
raise zaza_model.CommandRunFailed(cmd, result)
return json.loads(result.get('Stdout'))
def get_ceph_df(unit_name, model_name=None):
"""Return dict of ceph df json output, including ceph pool state.

View File

@@ -1737,6 +1737,15 @@ def get_overcloud_auth(address=None, model_name=None):
}
if tls_rid:
unit = model.get_first_unit_name('keystone', model_name=model_name)
# ensure that the path to put the local cacert in actually exists. The
# assumption that 'tests/' exists for, say, mojo is false.
# Needed due to:
# commit: 537473ad3addeaa3d1e4e2d0fd556aeaa4018eb2
_dir = os.path.dirname(KEYSTONE_LOCAL_CACERT)
if not os.path.exists(_dir):
os.makedirs(_dir)
model.scp_from_unit(
unit,
KEYSTONE_REMOTE_CACERT,
@@ -2036,7 +2045,8 @@ def upload_image_to_glance(glance, local_path, image_name, disk_format='qcow2',
return image
def create_image(glance, image_url, image_name, image_cache_dir=None, tags=[]):
def create_image(glance, image_url, image_name, image_cache_dir=None, tags=[],
properties=None):
"""Download the image and upload it to glance.
Download an image from image_url and upload it to glance labelling
@@ -2053,6 +2063,8 @@ def create_image(glance, image_url, image_name, image_cache_dir=None, tags=[]):
:type image_cache_dir: Option[str, None]
:param tags: Tags to add to image
:type tags: list of str
:param properties: Properties and values to add to image
:type properties: dict
:returns: glance image pointer
:rtype: glanceclient.common.utils.RequestIdProxy
"""
@@ -2074,6 +2086,11 @@ def create_image(glance, image_url, image_name, image_cache_dir=None, tags=[]):
logging.debug(
'applying tag to image: glance.image_tags.update({}, {}) = {}'
.format(image.id, tags, result))
logging.info("Setting image properties: {}".format(properties))
if properties:
result = glance.images.update(image.id, **properties)
return image
@@ -2202,7 +2219,8 @@ def get_private_key_file(keypair_name):
:returns: Path to file containing key
:rtype: str
"""
return 'tests/id_rsa_{}'.format(keypair_name)
tmp_dir = deployment_env.get_tmpdir()
return '{}/id_rsa_{}'.format(tmp_dir, keypair_name)
def write_private_key(keypair_name, key):
@@ -2400,7 +2418,7 @@ def ssh_command(username,
ssh.connect(ip, username=username, password=password)
else:
key = paramiko.RSAKey.from_private_key(io.StringIO(privkey))
ssh.connect(ip, username=username, password='', pkey=key)
ssh.connect(ip, username=username, password=None, pkey=key)
logging.info("Running {} on {}".format(command, vm_name))
stdin, stdout, stderr = ssh.exec_command(command)
if verify and callable(verify):

View File

@@ -35,6 +35,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
])
@@ -57,6 +58,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2019.1', 'stein'),
('2019.2', 'train'),
('2020.1', 'ussuri'),
('2020.2', 'victoria'),
])
OPENSTACK_RELEASES_PAIRS = [
@@ -66,7 +68,8 @@ OPENSTACK_RELEASES_PAIRS = [
'xenial_pike', 'artful_pike', 'xenial_queens',
'bionic_queens', 'bionic_rocky', 'cosmic_rocky',
'bionic_stein', 'disco_stein', 'bionic_train',
'eoan_train', 'bionic_ussuri', 'focal_ussuri']
'eoan_train', 'bionic_ussuri', 'focal_ussuri',
'focal_victoria', 'groovy_victoria']
# The ugly duckling - must list releases oldest to newest
SWIFT_CODENAMES = OrderedDict([
@@ -105,7 +108,9 @@ SWIFT_CODENAMES = OrderedDict([
('train',
['2.22.0']),
('ussuri',
['2.24.0']),
['2.24.0', '2.25.0']),
('victoria',
['2.25.0']),
])
# >= Liberty version->codename mapping
@@ -121,6 +126,7 @@ PACKAGE_CODENAMES = {
('19', 'stein'),
('20', 'train'),
('21', 'ussuri'),
('22', 'victoria'),
]),
'neutron-common': OrderedDict([
('7', 'liberty'),
@@ -133,6 +139,7 @@ PACKAGE_CODENAMES = {
('14', 'stein'),
('15', 'train'),
('16', 'ussuri'),
('17', 'victoria'),
]),
'cinder-common': OrderedDict([
('7', 'liberty'),
@@ -145,6 +152,7 @@ PACKAGE_CODENAMES = {
('14', 'stein'),
('15', 'train'),
('16', 'ussuri'),
('17', 'victoria'),
]),
'keystone': OrderedDict([
('8', 'liberty'),
@@ -157,6 +165,7 @@ PACKAGE_CODENAMES = {
('15', 'stein'),
('16', 'train'),
('17', 'ussuri'),
('18', 'victoria'),
]),
'horizon-common': OrderedDict([
('8', 'liberty'),
@@ -168,7 +177,8 @@ PACKAGE_CODENAMES = {
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
('17', 'ussuri'),
('18', 'ussuri'),
('19', 'victoria'),
]),
'ceilometer-common': OrderedDict([
('5', 'liberty'),
@@ -181,6 +191,7 @@ PACKAGE_CODENAMES = {
('12', 'stein'),
('13', 'train'),
('14', 'ussuri'),
('15', 'victoria'),
]),
'heat-common': OrderedDict([
('5', 'liberty'),
@@ -193,6 +204,7 @@ PACKAGE_CODENAMES = {
('12', 'stein'),
('13', 'train'),
('14', 'ussuri'),
('15', 'victoria'),
]),
'glance-common': OrderedDict([
('11', 'liberty'),
@@ -205,6 +217,7 @@ PACKAGE_CODENAMES = {
('18', 'stein'),
('19', 'train'),
('20', 'ussuri'),
('21', 'victoria'),
]),
'openstack-dashboard': OrderedDict([
('8', 'liberty'),
@@ -216,7 +229,8 @@ PACKAGE_CODENAMES = {
('14', 'rocky'),
('15', 'stein'),
('16', 'train'),
('17', 'ussuri'),
('18', 'ussuri'),
('19', 'victoria'),
]),
'designate-common': OrderedDict([
('1', 'liberty'),
@@ -229,6 +243,7 @@ PACKAGE_CODENAMES = {
('8', 'stein'),
('9', 'train'),
('10', 'ussuri'),
('11', 'victoria'),
]),
'ovn-common': OrderedDict([
('2', 'train'),

View File

@@ -13,15 +13,17 @@
# limitations under the License.
"""Collection of functions to support upgrade testing."""
import re
import itertools
import logging
import collections
import re
import zaza.model
SERVICE_GROUPS = collections.OrderedDict([
('Stateful Services', ['percona-cluster', 'rabbitmq-server', 'ceph-mon',
'mysql-innodb-cluster']),
SERVICE_GROUPS = (
('Database Services', ['percona-cluster', 'mysql-innodb-cluster']),
('Stateful Services', ['rabbitmq-server', 'ceph-mon']),
('Core Identity', ['keystone']),
('Control Plane', [
'aodh', 'barbican', 'ceilometer', 'ceph-fs',
@@ -31,8 +33,7 @@ SERVICE_GROUPS = collections.OrderedDict([
'nova-cloud-controller', 'openstack-dashboard']),
('Data Plane', [
'nova-compute', 'ceph-osd',
'swift-proxy', 'swift-storage'])
])
'swift-proxy', 'swift-storage']))
UPGRADE_EXCLUDE_LIST = ['rabbitmq-server', 'percona-cluster']
@@ -106,6 +107,30 @@ def _apply_extra_filters(filters, extra_filters):
return filters
def _filter_easyrsa(app, app_config, model_name=None):
charm_name = extract_charm_name_from_url(app_config['charm'])
if "easyrsa" in charm_name:
logging.warn("Skipping upgrade of easyrsa Bug #1850121")
return True
return False
def _filter_etcd(app, app_config, model_name=None):
charm_name = extract_charm_name_from_url(app_config['charm'])
if "etcd" in charm_name:
logging.warn("Skipping upgrade of easyrsa Bug #1850124")
return True
return False
def _filter_memcached(app, app_config, model_name=None):
charm_name = extract_charm_name_from_url(app_config['charm'])
if "memcached" in charm_name:
logging.warn("Skipping upgrade of memcached charm")
return True
return False
def get_upgrade_groups(model_name=None, extra_filters=None):
"""Place apps in the model into their upgrade groups.
@@ -170,21 +195,21 @@ def get_charm_upgrade_groups(model_name=None, extra_filters=None):
def _build_service_groups(applications):
groups = collections.OrderedDict()
for phase_name, charms in SERVICE_GROUPS.items():
groups = []
for phase_name, charms in SERVICE_GROUPS:
group = []
for app, app_config in applications.items():
charm_name = extract_charm_name_from_url(app_config['charm'])
if charm_name in charms:
group.append(app)
groups[phase_name] = group
groups.append((phase_name, group))
sweep_up = []
for app in applications:
if not (app in [a for group in groups.values() for a in group]):
sweep_up.append(app)
groups['sweep_up'] = sweep_up
for name, group in groups.items():
# collect all the values into a list, and then a lookup hash
values = list(itertools.chain(*(ls for _, ls in groups)))
vhash = {v: 1 for v in values}
sweep_up = [app for app in applications if app not in vhash]
groups.append(('sweep_up', sweep_up))
for name, group in groups:
group.sort()
return groups