Merge branch 'master' into hacluster-scaleback

This commit is contained in:
Aurelien Lourot
2020-09-08 12:40:12 +02:00
31 changed files with 673 additions and 283 deletions
+1 -1
View File
@@ -1,7 +1,7 @@
aiounittest
async_generator
boto3
juju
juju!=2.8.3 # blacklist 2.8.3 as it appears to have a connection bug
juju_wait
PyYAML<=4.2,>=3.0
flake8>=2.2.4
-12
View File
@@ -30,15 +30,3 @@ class TestOpenStackBaseTest(unittest.TestCase):
MyTestClass.setUpClass('foo', 'bar')
_setUpClass.assert_called_with('foo', 'bar')
class TestUtils(unittest.TestCase):
def test_format_addr(self):
self.assertEquals('1.2.3.4', test_utils.format_addr('1.2.3.4'))
self.assertEquals(
'[2001:db8::42]', test_utils.format_addr('2001:db8::42'))
with self.assertRaises(ValueError):
test_utils.format_addr('999.999.999.999')
with self.assertRaises(ValueError):
test_utils.format_addr('2001:db8::g')
@@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import mock
import pprint
@@ -89,12 +88,13 @@ class TestUpgradeUtils(ut_utils.BaseTestCase):
expected)
def test_get_upgrade_groups(self):
expected = collections.OrderedDict([
expected = [
('Database Services', []),
('Stateful Services', []),
('Core Identity', []),
('Control Plane', ['cinder']),
('Data Plane', ['nova-compute']),
('sweep_up', [])])
('sweep_up', [])]
actual = openstack_upgrade.get_upgrade_groups()
pprint.pprint(expected)
pprint.pprint(actual)
@@ -103,12 +103,13 @@ class TestUpgradeUtils(ut_utils.BaseTestCase):
expected)
def test_get_series_upgrade_groups(self):
expected = collections.OrderedDict([
('Stateful Services', ['mydb']),
expected = [
('Database Services', ['mydb']),
('Stateful Services', []),
('Core Identity', []),
('Control Plane', ['cinder']),
('Data Plane', ['nova-compute']),
('sweep_up', ['ntp'])])
('sweep_up', ['ntp'])]
actual = openstack_upgrade.get_series_upgrade_groups()
pprint.pprint(expected)
pprint.pprint(actual)
@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for benchmarking ceph."""
@@ -0,0 +1,124 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ceph Benchmark Tests."""
import logging
import re
import unittest
import zaza.model
class BenchmarkTests(unittest.TestCase):
"""Ceph Bencharmk Tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running ceph benchmark tests."""
super().setUpClass()
cls.results_match = "^[A-Z].*"
cls.pool = "zaza_benchmarks"
cls.test_results = {}
cls.time_in_secs = 30
def parse_bench_results(self, results_string):
"""Parse bench results from string.
:param results string: Output from rados bench command.
With newlines due to juju run's output.
:type results_string: string
:returns: Dictionary of results summary
:rtype: dict
"""
_results = {}
_lines = results_string.split("\n")
for _line in _lines:
_line = _line.strip()
if re.match(self.results_match, _line):
_keyvalues = _line.split(":")
try:
_results[_keyvalues[0].strip()] = _keyvalues[1].strip()
except IndexError:
# Skipping detailed output for summary details
pass
return _results
def run_rados_bench(self, action, params=None):
"""Run rados bench.
:param action: String rados bench command i.e. write, rand, seq
:type action: string
:param params: List of string extra parameters to rados bench command
:type params: List[strings]
:returns: Unit run dict result
:rtype: dict
"""
_cmd = "rados bench -p {} {} {}".format(
self.pool, self.time_in_secs, action)
if params:
_cmd += " "
_cmd += " ".join(params)
logging.info(
"Running '{}' for {} seconds ...".format(_cmd, self.time_in_secs))
_result = zaza.model.run_on_leader(
"ceph-mon", _cmd, timeout=self.time_in_secs + 60)
return _result
def test_001_create_pool(self):
"""Create ceph pool."""
_cmd = "ceph osd pool create {} 100 100".format(self.pool)
_result = zaza.model.run_on_leader(
"ceph-mon", _cmd)
if _result.get("Code") and not _result.get("Code").startswith('0'):
if "already exists" in _result.get("Stderr", ""):
logging.warning(
"Ceph osd pool {} already exits.".format(self.pool))
else:
logging.error("Ceph osd pool create failed")
raise Exception(_result.get("Stderr", ""))
def test_100_rados_bench_write(self):
"""Rados bench write test."""
_result = self.run_rados_bench("write", params=["--no-cleanup"])
self.test_results["write"] = (
self.parse_bench_results(_result.get("Stdout", "")))
def test_200_rados_bench_read_seq(self):
"""Rados bench read sequential test."""
_result = self.run_rados_bench("seq")
self.test_results["read_seq"] = (
self.parse_bench_results(_result.get("Stdout", "")))
def test_300_rados_bench_read_rand(self):
"""Rados bench read random test."""
_result = self.run_rados_bench("rand")
self.test_results["read_rand"] = (
self.parse_bench_results(_result.get("Stdout", "")))
def test_998_rados_cleanup(self):
"""Cleanup rados bench data."""
_cmd = "rados -p {} cleanup".format(self.pool)
_result = zaza.model.run_on_leader("ceph-mon", _cmd)
if _result.get("Code") and not _result.get("Code").startswith('0'):
logging.warning("rados cleanup failed")
def test_999_print_rados_bench_results(self):
"""Print rados bench results."""
print("######## Begin Ceph Results ########")
for test, results in self.test_results.items():
print("##### {} ######".format(test))
for key, value in results.items():
print("{}: {}".format(key, value))
print("######## End Ceph Results ########")
+73 -6
View File
@@ -766,16 +766,23 @@ class CephProxyTest(unittest.TestCase):
msg = 'cinder-ceph pool was not found upon querying ceph-mon/0'
raise zaza_exceptions.CephPoolNotFound(msg)
expected = "pool=cinder-ceph, allow class-read " \
"object_prefix rbd_children"
# Checking for cinder-ceph specific permissions makes
# the test more rugged when we add additional relations
# to ceph for other applications (such as glance and nova).
expected_permissions = [
"allow rwx pool=cinder-ceph",
"allow class-read object_prefix rbd_children",
]
cmd = "sudo ceph auth get client.cinder-ceph"
result = zaza_model.run_on_unit('ceph-mon/0', cmd)
output = result.get('Stdout').strip()
if expected not in output:
msg = ('cinder-ceph pool restriction was not configured correctly.'
' Found: {}'.format(output))
raise zaza_exceptions.CephPoolNotConfigured(msg)
for expected in expected_permissions:
if expected not in output:
msg = ('cinder-ceph pool restriction ({}) was not'
' configured correctly.'
' Found: {}'.format(expected, output))
raise zaza_exceptions.CephPoolNotConfigured(msg)
class CephPrometheusTest(unittest.TestCase):
@@ -794,6 +801,66 @@ class CephPrometheusTest(unittest.TestCase):
'3', _get_mon_count_from_prometheus(unit.public_address))
class CephPoolConfig(Exception):
"""Custom Exception for bad Ceph pool config."""
pass
class CheckPoolTypes(unittest.TestCase):
"""Test the ceph pools created for clients are of the expected type."""
def test_check_pool_types(self):
"""Check type of pools created for clients."""
app_pools = [
('glance', 'glance'),
('nova-compute', 'nova'),
('cinder-ceph', 'cinder-ceph')]
runtime_pool_details = zaza_ceph.get_ceph_pool_details()
for app, pool_name in app_pools:
try:
app_config = zaza_model.get_application_config(app)
except KeyError:
logging.info(
'Skipping pool check of %s, application %s not present',
pool_name,
app)
continue
rel_id = zaza_model.get_relation_id(
app,
'ceph-mon',
remote_interface_name='client')
if not rel_id:
logging.info(
'Skipping pool check of %s, ceph relation not present',
app)
continue
juju_pool_config = app_config.get('pool-type')
if juju_pool_config:
expected_pool_type = juju_pool_config['value']
else:
# If the pool-type option is absent assume the default of
# replicated.
expected_pool_type = zaza_ceph.REPLICATED_POOL_TYPE
for pool_config in runtime_pool_details:
if pool_config['pool_name'] == pool_name:
logging.info('Checking {} is {}'.format(
pool_name,
expected_pool_type))
expected_pool_code = -1
if expected_pool_type == zaza_ceph.REPLICATED_POOL_TYPE:
expected_pool_code = zaza_ceph.REPLICATED_POOL_CODE
elif expected_pool_type == zaza_ceph.ERASURE_POOL_TYPE:
expected_pool_code = zaza_ceph.ERASURE_POOL_CODE
self.assertEqual(
pool_config['type'],
expected_pool_code)
break
else:
raise CephPoolConfig(
"Failed to find config for {}".format(pool_name))
# NOTE: We might query before prometheus has fetch data
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1,
min=5, max=10),
@@ -53,8 +53,11 @@ class FullCloudCharmUpgradeTest(unittest.TestCase):
"""Run charm upgrade."""
self.lts.test_launch_small_instance()
applications = zaza.model.get_status().applications
groups = upgrade_utils.get_charm_upgrade_groups()
for group_name, group in groups.items():
groups = upgrade_utils.get_charm_upgrade_groups(
extra_filters=[upgrade_utils._filter_etcd,
upgrade_utils._filter_easyrsa,
upgrade_utils._filter_memcached])
for group_name, group in groups:
logging.info("About to upgrade {} ({})".format(group_name, group))
for application, app_details in applications.items():
if application not in group:
@@ -97,7 +97,7 @@ class CinderBackupTest(test_utils.OpenStackBaseTest):
self.cinder_client.volumes,
cinder_vol.id,
wait_iteration_max_time=180,
stop_after_attempt=15,
stop_after_attempt=30,
expected_status='available',
msg='Volume status wait')
@@ -109,7 +109,7 @@ class CinderBackupTest(test_utils.OpenStackBaseTest):
self.cinder_client.backups,
vol_backup.id,
wait_iteration_max_time=180,
stop_after_attempt=15,
stop_after_attempt=30,
expected_status='available',
msg='Volume status wait')
# Delete the volume
+33 -1
View File
@@ -16,12 +16,15 @@
"""Encapsulate Gnocchi testing."""
import base64
import boto3
import logging
import pprint
from gnocchiclient.v1 import client as gnocchi_client
import zaza.model as model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities as utilities
import zaza.openstack.utilities.openstack as openstack_utils
@@ -73,7 +76,7 @@ class GnocchiS3Test(test_utils.OpenStackBaseTest):
session = openstack_utils.get_overcloud_keystone_session()
ks_client = openstack_utils.get_keystone_session_client(session)
# Get token data so we can glean our user_id and project_id
# Get token data so we can clean our user_id and project_id
token_data = ks_client.tokens.get_token_data(session.get_token())
project_id = token_data['token']['project']['id']
user_id = token_data['token']['user']['id']
@@ -110,3 +113,32 @@ class GnocchiS3Test(test_utils.OpenStackBaseTest):
break
else:
AssertionError('Bucket "{}" not found'.format(gnocchi_bkt))
class GnocchiExternalCATest(test_utils.OpenStackBaseTest):
"""Test Gnocchi for external root CA config option."""
def test_upload_external_cert(self):
"""Verify that the external CA is uploaded correctly."""
logging.info('Changing value for trusted-external-ca-cert.')
ca_cert_option = 'trusted-external-ca-cert'
ppk, cert = utilities.cert.generate_cert('gnocchi_test.ci.local')
b64_cert = base64.b64encode(cert).decode()
config = {
ca_cert_option: b64_cert,
}
model.set_application_config(
'gnocchi',
config
)
model.block_until_all_units_idle()
files = [
'/usr/local/share/ca-certificates/gnocchi-external.crt',
'/etc/ssl/certs/gnocchi-external.pem',
]
for file in files:
logging.info("Validating that {} is created.".format(file))
model.block_until_file_has_contents('gnocchi', file, 'CERTIFICATE')
logging.info("Found {} successfully.".format(file))
+32 -30
View File
@@ -79,58 +79,60 @@ class HaclusterTest(HaclusterBaseTest):
class HaclusterScalebackTest(HaclusterBaseTest):
"""hacluster scaleback tests."""
_PRINCIPLE_APP_NAME = 'keystone'
_HACLUSTER_APP_NAME = 'hacluster'
_HACLUSTER_CHARM_NAME = 'hacluster'
@classmethod
def setUpClass(cls):
"""Run class setup for running hacluster scaleback tests."""
super(HaclusterScalebackTest, cls).setUpClass()
test_config = cls.test_config['tests_options']['hacluster']
cls._principle_app_name = test_config['principle-app-name']
cls._hacluster_app_name = test_config['hacluster-app-name']
cls._hacluster_charm_name = test_config['hacluster-charm-name']
def test_930_scaleback(self):
"""Remove a unit, recalculate quorum and add a new one."""
principle_units = zaza.model.get_status().applications[
self._PRINCIPLE_APP_NAME]['units']
principle_units = sorted(zaza.model.get_status().applications[
self._principle_app_name]['units'].keys())
self.assertEqual(len(principle_units), 3)
doomed_principle = sorted(principle_units.keys())[0]
doomed_unit = juju_utils.get_subordinate_units(
[doomed_principle], charm_name=self._HACLUSTER_CHARM_NAME)[0]
doomed_principle_unit = principle_units[0]
other_principle_unit = principle_units[1]
doomed_hacluster_unit = juju_utils.get_subordinate_units(
[doomed_principle_unit], charm_name=self._hacluster_charm_name)[0]
other_hacluster_unit = juju_utils.get_subordinate_units(
[other_principle_unit], charm_name=self._hacluster_charm_name)[0]
logging.info('Pausing unit {}'.format(doomed_unit))
logging.info('Pausing unit {}'.format(doomed_hacluster_unit))
zaza.model.run_action(
doomed_unit,
doomed_hacluster_unit,
'pause',
raise_on_failure=True)
logging.info('OK')
logging.info('Removing {}'.format(doomed_principle))
logging.info('Removing {}'.format(doomed_principle_unit))
zaza.model.destroy_unit(
self._PRINCIPLE_APP_NAME,
doomed_principle,
self._principle_app_name,
doomed_principle_unit,
wait_disappear=True)
logging.info('OK')
logging.info('Waiting for model to settle')
zaza.model.block_until_unit_wl_status(other_hacluster_unit, 'blocked')
zaza.model.block_until_unit_wl_status(other_principle_unit, 'blocked')
zaza.model.block_until_all_units_idle()
logging.info('OK')
logging.info('Updating corosync ring')
zaza.model.run_action_on_leader(
self._HACLUSTER_APP_NAME,
self._hacluster_app_name,
'update-ring',
action_params={'i-really-mean-it': True},
raise_on_failure=True)
expected_states = {
self._HACLUSTER_APP_NAME: {
"workload-status": "blocked",
"workload-status-message":
"Insufficient peer units for ha cluster (require 3)"
},
self._PRINCIPLE_APP_NAME: {
"workload-status": "blocked",
"workload-status-message": "Database not initialised",
},
}
zaza.model.wait_for_application_states(states=expected_states)
zaza.model.block_until_all_units_idle()
logging.info('Adding an hacluster unit')
zaza.model.add_unit(self._principle_app_name, wait_appear=True)
logging.info('OK')
logging.info('Adding a hacluster unit')
zaza.model.add_unit(self._PRINCIPLE_APP_NAME, wait_appear=True)
expected_states = {self._HACLUSTER_APP_NAME: {
logging.info('Waiting for model to settle')
expected_states = {self._hacluster_app_name: {
"workload-status": "active",
"workload-status-message": "Unit is ready and clustered"}}
zaza.model.wait_for_application_states(states=expected_states)
@@ -29,6 +29,9 @@ import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.utilities.generic as generic_utils
PXC_SEEDED_FILE = "/var/lib/percona-xtradb-cluster/seeded"
class MySQLBaseTest(test_utils.OpenStackBaseTest):
"""Base for mysql charm tests."""
@@ -265,6 +268,10 @@ class PerconaClusterCharmTests(MySQLCommonTests, PerconaClusterBaseTest):
" (wanted=%s, cluster_size=%s)" % (self.units, cluster_size))
assert cluster_size >= self.units, msg
logging.info("Ensuring PXC seeded file is present")
zaza.model.block_until_file_has_contents(self.application,
PXC_SEEDED_FILE, "done")
def test_130_change_root_password(self):
"""Change root password.
+44 -32
View File
@@ -23,7 +23,6 @@
import copy
import logging
import tenacity
import unittest
import zaza
import zaza.openstack.charm_tests.nova.utils as nova_utils
@@ -35,6 +34,7 @@ import zaza.openstack.utilities.openstack as openstack_utils
class NeutronPluginApiSharedTests(test_utils.OpenStackBaseTest):
"""Shared tests for Neutron Plugin API Charms."""
@classmethod
def setUpClass(cls):
"""Run class setup for running Neutron Openvswitch tests."""
super(NeutronPluginApiSharedTests, cls).setUpClass()
@@ -109,7 +109,7 @@ class NeutronGatewayTest(NeutronPluginApiSharedTests):
@classmethod
def setUpClass(cls):
"""Run class setup for running Neutron Gateway tests."""
super(NeutronGatewayTest, cls).setUpClass(cls)
super(NeutronGatewayTest, cls).setUpClass()
cls.services = cls._get_services()
# set up clients
@@ -148,33 +148,6 @@ class NeutronGatewayTest(NeutronPluginApiSharedTests):
self.assertIn('qos', ovs_agent['configurations']['extensions'])
@unittest.expectedFailure
def test_800_ovs_bridges_are_managed_by_us(self):
"""Checking OVS bridges' external-id.
OVS bridges created by us should be marked as managed by us in their
external-id. See
http://docs.openvswitch.org/en/latest/topics/integration/
NOTE(lourot): this test is expected to fail as long as this feature
hasn't landed yet: https://review.opendev.org/717074
"""
for unit in zaza.model.get_units(self._APP_NAME,
model_name=self.model_name):
for bridge_name in ('br-int', 'br-ex'):
logging.info(
'Checking that the bridge {}:{}'.format(
unit.name, bridge_name
) + ' is marked as managed by us'
)
expected_external_id = 'charm-neutron-gateway=managed'
actual_external_id = zaza.model.run_on_unit(
unit.entity_id,
'ovs-vsctl br-get-external-id {}'.format(bridge_name),
model_name=self.model_name
)['Stdout'].strip()
self.assertEqual(actual_external_id, expected_external_id)
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change.
@@ -295,19 +268,31 @@ class NeutronCreateNetworkTest(test_utils.OpenStackBaseTest):
def test_400_create_network(self):
"""Create a network, verify that it exists, and then delete it."""
self._wait_for_neutron_ready()
self._assert_test_network_doesnt_exist()
self._create_test_network()
net_id = self._assert_test_network_exists_and_return_id()
self._delete_test_network(net_id)
self._assert_test_network_doesnt_exist()
@classmethod
def _wait_for_neutron_ready(cls):
logging.info('Waiting for Neutron to become ready...')
zaza.model.wait_for_application_states()
for attempt in tenacity.Retrying(
wait=tenacity.wait_fixed(5), # seconds
stop=tenacity.stop_after_attempt(12),
reraise=True):
with attempt:
cls.neutron_client.list_networks()
def _create_test_network(self):
logging.debug('Creating neutron network...')
logging.info('Creating neutron network...')
network = {'name': self._TEST_NET_NAME}
self.neutron_client.create_network({'network': network})
def _delete_test_network(self, net_id):
logging.debug('Deleting neutron network...')
logging.info('Deleting neutron network...')
self.neutron_client.delete_network(net_id)
def _assert_test_network_exists_and_return_id(self):
@@ -442,7 +427,7 @@ class NeutronOpenvSwitchTest(NeutronPluginApiSharedTests):
@classmethod
def setUpClass(cls):
"""Run class setup for running Neutron Openvswitch tests."""
super(NeutronOpenvSwitchTest, cls).setUpClass(cls)
super(NeutronOpenvSwitchTest, cls).setUpClass()
# set up client
cls.neutron_client = (
@@ -605,6 +590,33 @@ class NeutronOpenvSwitchTest(NeutronPluginApiSharedTests):
logging.info('Testing pause resume')
class NeutronOvsVsctlTest(NeutronPluginApiSharedTests):
"""Test 'ovs-vsctl'-related functionality on Neutron charms."""
def test_800_ovs_bridges_are_managed_by_us(self):
"""Checking OVS bridges' external-id.
OVS bridges created by us should be marked as managed by us in their
external-id. See
http://docs.openvswitch.org/en/latest/topics/integration/
"""
for unit in zaza.model.get_units(self.application_name,
model_name=self.model_name):
for bridge_name in ('br-int', 'br-ex'):
logging.info(
'Checking that the bridge {}:{}'.format(
unit.name, bridge_name
) + ' is marked as managed by us'
)
expected_external_id = 'charm-neutron-gateway=managed'
actual_external_id = zaza.model.run_on_unit(
unit.entity_id,
'ovs-vsctl br-get-external-id {}'.format(bridge_name),
model_name=self.model_name
)['Stdout'].strip()
self.assertEqual(actual_external_id, expected_external_id)
class NeutronNetworkingBase(test_utils.OpenStackBaseTest):
"""Base for checking openstack instances have valid networking."""
@@ -29,25 +29,40 @@ class NeutronCreateAristaNetworkTest(neutron_tests.NeutronCreateNetworkTest):
def setUpClass(cls):
"""Run class setup for running Neutron Arista tests."""
super(NeutronCreateAristaNetworkTest, cls).setUpClass()
cls._wait_for_neutron_ready()
logging.info('Waiting for Neutron to become ready...')
def _assert_test_network_exists_and_return_id(self):
logging.info('Checking that the test network exists on the Arista '
'test fixture...')
# Sometimes the API call from Neutron to Arista fails and Neutron
# retries a couple of seconds later, which is why the newly created
# test network may not be immediately visible on Arista's API.
# NOTE(lourot): I experienced a run where it took 53 seconds.
for attempt in tenacity.Retrying(
wait=tenacity.wait_fixed(5), # seconds
wait=tenacity.wait_fixed(10), # seconds
stop=tenacity.stop_after_attempt(12),
reraise=True):
with attempt:
cls.neutron_client.list_networks()
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [self._TEST_NET_NAME])
def _assert_test_network_exists_and_return_id(self):
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [self._TEST_NET_NAME])
return super(NeutronCreateAristaNetworkTest,
self)._assert_test_network_exists_and_return_id()
def _assert_test_network_doesnt_exist(self):
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [])
logging.info("Checking that the test network doesn't exist on the "
"Arista test fixture...")
for attempt in tenacity.Retrying(
wait=tenacity.wait_fixed(10), # seconds
stop=tenacity.stop_after_attempt(12),
reraise=True):
with attempt:
actual_network_names = arista_utils.query_fixture_networks(
arista_utils.fixture_ip_addr())
self.assertEqual(actual_network_names, [])
super(NeutronCreateAristaNetworkTest,
self)._assert_test_network_doesnt_exist()
+1 -1
View File
@@ -92,7 +92,7 @@ def configure_octavia():
del test_config['target_deploy_status']['octavia']
_singleton = zaza.openstack.charm_tests.test_utils.OpenStackBaseTest()
_singleton.setUpClass()
_singleton.setUpClass(application_name='octavia')
with _singleton.config_change(cert_config, cert_config):
# wait for configuration to be applied then return
pass
+3 -84
View File
@@ -659,7 +659,7 @@ class HeatTests(BasePolicydSpecialization):
class OctaviaTests(BasePolicydSpecialization):
"""Test the policyd override using the octavia client."""
_rule = {'rule.yaml': "{'os_load-balancer_api:loadbalancer:get_one': '!'}"}
_rule = {'rule.yaml': "{'os_load-balancer_api:provider:get_all': '!'}"}
@classmethod
def setUpClass(cls, application_name=None):
@@ -667,89 +667,8 @@ class OctaviaTests(BasePolicydSpecialization):
super(OctaviaTests, cls).setUpClass(application_name="octavia")
cls.application_name = "octavia"
def setup_for_attempt_operation(self, ip):
"""Create a loadbalancer.
This is necessary so that the attempt is to show the load-balancer and
this is an operator that the policy can stop. Unfortunately, octavia,
whilst it has a policy for just listing load-balancers, unfortunately,
it doesn't work; whereas showing the load-balancer can be stopped.
NB this only works if the setup phase of the octavia tests have been
completed.
:param ip: the ip of for keystone.
:type ip: str
"""
logging.info("Setting up loadbalancer.")
auth = openstack_utils.get_overcloud_auth(address=ip)
sess = openstack_utils.get_keystone_session(auth)
octavia_client = openstack_utils.get_octavia_session_client(sess)
neutron_client = openstack_utils.get_neutron_session_client(sess)
if openstack_utils.dvr_enabled():
network_name = 'private_lb_fip_network'
else:
network_name = 'private'
resp = neutron_client.list_networks(name=network_name)
vip_subnet_id = resp['networks'][0]['subnets'][0]
res = octavia_client.load_balancer_create(
json={
'loadbalancer': {
'description': 'Created by Zaza',
'admin_state_up': True,
'vip_subnet_id': vip_subnet_id,
'name': 'zaza-lb-0',
}})
self.lb_id = res['loadbalancer']['id']
# now wait for it to get to the active state
@tenacity.retry(wait=tenacity.wait_fixed(1),
reraise=True, stop=tenacity.stop_after_delay(900))
def wait_for_lb_resource(client, resource_id):
resp = client.load_balancer_show(resource_id)
logging.info(resp['provisioning_status'])
assert resp['provisioning_status'] == 'ACTIVE', (
'load balancer resource has not reached '
'expected provisioning status: {}'
.format(resp))
return resp
logging.info('Awaiting loadbalancer to reach provisioning_status '
'"ACTIVE"')
resp = wait_for_lb_resource(octavia_client, self.lb_id)
logging.info(resp)
logging.info("Setup loadbalancer complete.")
def cleanup_for_attempt_operation(self, ip):
"""Remove the loadbalancer.
:param ip: the ip of for keystone.
:type ip: str
"""
logging.info("Deleting loadbalancer {}.".format(self.lb_id))
auth = openstack_utils.get_overcloud_auth(address=ip)
sess = openstack_utils.get_keystone_session(auth)
octavia_client = openstack_utils.get_octavia_session_client(sess)
octavia_client.load_balancer_delete(self.lb_id)
logging.info("Deleting loadbalancer in progress ...")
@tenacity.retry(wait=tenacity.wait_fixed(1),
reraise=True, stop=tenacity.stop_after_delay(900))
def wait_til_deleted(client, lb_id):
lb_list = client.load_balancer_list()
ids = [lb['id'] for lb in lb_list['loadbalancers']]
assert lb_id not in ids, 'load balancer still deleting'
wait_til_deleted(octavia_client, self.lb_id)
logging.info("Deleted loadbalancer.")
def get_client_and_attempt_operation(self, ip):
"""Attempt to show the loadbalancer as a policyd override.
"""Attempt to list available provider drivers.
This operation should pass normally, and fail when
the rule has been overriden (see the `rule` class variable.
@@ -761,6 +680,6 @@ class OctaviaTests(BasePolicydSpecialization):
octavia_client = openstack_utils.get_octavia_session_client(
self.get_keystone_session_admin_user(ip))
try:
octavia_client.load_balancer_show(self.lb_id)
octavia_client.provider_list()
except octaviaclient.OctaviaClientException:
raise PolicydOperationFailedException()
@@ -21,6 +21,7 @@ import requests
import zaza.model
from zaza.openstack.charm_tests.keystone import BaseKeystoneTest
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.openstack.utilities.openstack as openstack_utils
class FailedToReachIDP(Exception):
@@ -42,6 +43,8 @@ class CharmKeystoneSAMLMellonTest(BaseKeystoneTest):
cls.test_config = lifecycle_utils.get_charm_config()
cls.application_name = cls.test_config['charm_name']
cls.action = "get-sp-metadata"
cls.current_release = openstack_utils.get_os_release()
cls.FOCAL_USSURI = openstack_utils.get_os_release("focal_ussuri")
def test_run_get_sp_metadata_action(self):
"""Validate the get-sp-metadata action."""
@@ -92,8 +95,13 @@ class CharmKeystoneSAMLMellonTest(BaseKeystoneTest):
else:
proto = "http"
# Use Keystone URL for < Focal
if self.current_release < self.FOCAL_USSURI:
region = "{}://{}:5000/v3".format(proto, keystone_ip)
else:
region = "default"
url = "{}://{}/horizon/auth/login/".format(proto, horizon_ip)
region = "{}://{}:5000/v3".format(proto, keystone_ip)
horizon_expect = ('<option value="samltest_mapped">'
'samltest.id</option>')
@@ -73,11 +73,14 @@ class ParallelSeriesUpgradeTest(unittest.TestCase):
workaround_script = None
files = []
applications = model.get_status().applications
for group_name, apps in upgrade_groups.items():
for group_name, apps in upgrade_groups:
logging.info("About to upgrade {} from {} to {}".format(
group_name, from_series, to_series))
upgrade_functions = []
if group_name in ["Stateful Services", "Data Plane", "sweep_up"]:
if group_name in ["Database Services",
"Stateful Services",
"Data Plane",
"sweep_up"]:
logging.info("Going to upgrade {} unit by unit".format(apps))
upgrade_function = \
parallel_series_upgrade.serial_series_upgrade
@@ -30,22 +30,6 @@ from zaza.openstack.utilities import (
from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest
def _filter_easyrsa(app, app_config, model_name=None):
charm_name = upgrade_utils.extract_charm_name_from_url(app_config['charm'])
if "easyrsa" in charm_name:
logging.warn("Skipping series upgrade of easyrsa Bug #1850121")
return True
return False
def _filter_etcd(app, app_config, model_name=None):
charm_name = upgrade_utils.extract_charm_name_from_url(app_config['charm'])
if "etcd" in charm_name:
logging.warn("Skipping series upgrade of easyrsa Bug #1850124")
return True
return False
class SeriesUpgradeTest(unittest.TestCase):
"""Class to encapsulate Series Upgrade Tests."""
@@ -75,7 +59,7 @@ class SeriesUpgradeTest(unittest.TestCase):
continue
if "etcd" in app_details["charm"]:
logging.warn(
"Skipping series upgrade of easyrsa Bug #1850124")
"Skipping series upgrade of etcd Bug #1850124")
continue
charm_name = upgrade_utils.extract_charm_name_from_url(
app_details['charm'])
@@ -97,7 +81,7 @@ class SeriesUpgradeTest(unittest.TestCase):
logging.info(
"Running complete-cluster-series-upgrade action on leader")
model.run_action_on_leader(
'rabbitmq-server',
charm_name,
'complete-cluster-series-upgrade',
action_params={})
model.block_until_all_units_idle()
@@ -106,7 +90,7 @@ class SeriesUpgradeTest(unittest.TestCase):
logging.info(
"Running complete-cluster-series-upgrade action on leader")
model.run_action_on_leader(
'mysql',
charm_name,
'complete-cluster-series-upgrade',
action_params={})
model.block_until_all_units_idle()
@@ -208,10 +192,11 @@ class ParallelSeriesUpgradeTest(unittest.TestCase):
# Set Feature Flag
os.environ["JUJU_DEV_FEATURE_FLAGS"] = "upgrade-series"
upgrade_groups = upgrade_utils.get_series_upgrade_groups(
extra_filters=[_filter_etcd, _filter_easyrsa])
extra_filters=[upgrade_utils._filter_etcd,
upgrade_utils._filter_easyrsa])
applications = model.get_status().applications
completed_machines = []
for group_name, group in upgrade_groups.items():
for group_name, group in upgrade_groups:
logging.warn("About to upgrade {} ({})".format(group_name, group))
upgrade_group = []
for application, app_details in applications.items():
+12 -3
View File
@@ -104,16 +104,25 @@ class SwiftStorageTests(test_utils.OpenStackBaseTest):
services = ['swift-account-server',
'swift-account-auditor',
'swift-account-reaper',
'swift-account-replicator',
'swift-container-server',
'swift-container-auditor',
'swift-container-replicator',
'swift-container-updater',
'swift-object-server',
'swift-object-auditor',
'swift-object-replicator',
'swift-object-updater',
'swift-container-sync']
current_os_release = openstack_utils.get_os_release()
focal_victoria = openstack_utils.get_os_release('focal_victoria')
if current_os_release < focal_victoria:
services += ['swift-account-replicator',
'swift-container-replicator',
'swift-object-replicator']
else:
services += ['swift-account-server',
'swift-container-server',
'swift-object-server']
with self.pause_resume(services):
logging.info("Testing pause resume")
+7 -12
View File
@@ -16,11 +16,12 @@
import jinja2
import urllib.parse
import subprocess
import os
import zaza.utilities.deployment_env as deployment_env
import zaza.openstack.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.tempest.utils as tempest_utils
import zaza.openstack.charm_tests.glance.setup as glance_setup
SETUP_ENV_VARS = {
@@ -279,21 +280,15 @@ def setup_tempest(tempest_template, accounts_template):
:returns: None
:rtype: None
"""
try:
subprocess.check_call(['tempest', 'workspace', 'remove', '--rmdir',
'--name', 'tempest-workspace'])
except subprocess.CalledProcessError:
pass
try:
subprocess.check_call(['tempest', 'init', 'tempest-workspace'])
except subprocess.CalledProcessError:
pass
workspace_name, workspace_path = tempest_utils.get_workspace()
tempest_utils.destroy_workspace(workspace_name, workspace_path)
tempest_utils.init_workspace(workspace_path)
render_tempest_config(
'tempest-workspace/etc/tempest.conf',
os.path.join(workspace_path, 'etc/tempest.conf'),
get_tempest_context(),
tempest_template)
render_tempest_config(
'tempest-workspace/etc/accounts.yaml',
os.path.join(workspace_path, 'etc/accounts.yaml'),
get_tempest_context(),
accounts_template)
@@ -34,7 +34,7 @@ attach_encrypted_volume = false
{% if 'keystone' in enabled_services %}
[identity]
uri = {proto}://{{ keystone }}:5000/v2.0
uri = {{ proto }}://{{ keystone }}:5000/v2.0
auth_version = v2
admin_role = Admin
region = RegionOne
+14 -6
View File
@@ -20,6 +20,7 @@ import subprocess
import zaza
import zaza.charm_lifecycle.utils
import zaza.charm_lifecycle.test
import zaza.openstack.charm_tests.tempest.utils as tempest_utils
import tempfile
@@ -33,21 +34,24 @@ class TempestTest():
Test keys are parsed from ['tests_options']['tempest']['model'], where
valid test keys are: smoke (bool), whitelist (list of tests), blacklist
(list of tests), and regex (list of regex's).
(list of tests), regex (list of regex's), and keep-workspace (bool).
:returns: Status of tempest run
:rtype: bool
"""
result = True
charm_config = zaza.charm_lifecycle.utils.get_charm_config()
workspace_name, workspace_path = tempest_utils.get_workspace()
tempest_options = ['tempest', 'run', '--workspace',
'tempest-workspace', '--config',
'tempest-workspace/etc/tempest.conf']
workspace_name, '--config',
os.path.join(workspace_path, 'etc/tempest.conf')]
for model_alias in zaza.model.get_juju_model_aliases().keys():
tempest_test_key = model_alias
if model_alias == zaza.charm_lifecycle.utils.DEFAULT_MODEL_ALIAS:
tempest_test_key = 'default'
config = charm_config['tests_options']['tempest'][tempest_test_key]
if config.get('smoke'):
smoke = config.get('smoke')
if smoke and smoke is True:
tempest_options.extend(['--smoke'])
if config.get('regex'):
tempest_options.extend(
@@ -74,5 +78,9 @@ class TempestTest():
try:
subprocess.check_call(tempest_options)
except subprocess.CalledProcessError:
return False
return True
result = False
break
keep_workspace = config.get('keep-workspace')
if not keep_workspace or keep_workspace is not True:
tempest_utils.destroy_workspace(workspace_name, workspace_path)
return result
@@ -0,0 +1,67 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility code for working with tempest workspaces."""
import os
from pathlib import Path
import shutil
import subprocess
import zaza.model as model
def get_workspace():
"""Get tempest workspace name and path.
:returns: A tuple containing tempest workspace name and workspace path
:rtype: Tuple[str, str]
"""
home = str(Path.home())
workspace_name = model.get_juju_model()
workspace_path = os.path.join(home, '.tempest', workspace_name)
return (workspace_name, workspace_path)
def destroy_workspace(workspace_name, workspace_path):
"""Delete tempest workspace.
:param workspace_name: name of workspace
:type workspace_name: str
:param workspace_path: directory path where workspace is stored
:type workspace_path: str
:returns: None
:rtype: None
"""
try:
subprocess.check_call(['tempest', 'workspace', 'remove', '--rmdir',
'--name', workspace_name])
except (subprocess.CalledProcessError, FileNotFoundError):
pass
if os.path.isdir(workspace_path):
shutil.rmtree(workspace_path)
def init_workspace(workspace_path):
"""Initialize tempest workspace.
:param workspace_path: directory path where workspace is stored
:type workspace_path: str
:returns: None
:rtype: None
"""
try:
subprocess.check_call(['tempest', 'init', workspace_path])
except subprocess.CalledProcessError:
pass
-18
View File
@@ -14,7 +14,6 @@
"""Module containing base class for implementing charm tests."""
import contextlib
import logging
import ipaddress
import subprocess
import tenacity
import unittest
@@ -541,20 +540,3 @@ class OpenStackBaseTest(BaseCharmTest):
instance_2 = self.retrieve_guest(
'{}-ins-1'.format(self.RESOURCE_PREFIX))
return instance_1, instance_2
def format_addr(addr):
"""Validate and format IP address.
:param addr: IPv6 or IPv4 address
:type addr: str
:returns: Address string, optionally encapsulated in brackets([])
:rtype: str
:raises: ValueError
"""
ipaddr = ipaddress.ip_address(addr)
if isinstance(ipaddr, ipaddress.IPv6Address):
fmt = '[{}]'
else:
fmt = '{}'
return fmt.format(ipaddr)
+48 -11
View File
@@ -21,15 +21,16 @@ import tenacity
import zaza.model as zaza_model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.glance.setup as glance_setup
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.configure.guest as guest_utils
import zaza.openstack.utilities.openstack as openstack_utils
from zaza.utilities import juju as juju_utils
def _resource_reaches_status(
unit, auth_args, command, resource_id, target_status
unit, auth_args, status_command, full_status_command, resource_id,
target_status
):
"""Wait for a workload resource to reach a status.
@@ -37,8 +38,12 @@ def _resource_reaches_status(
:type unit: zaza_model.Unit
:param auth_args: authentication arguments for command
:type auth_args: str
:param command: command to execute
:type command: str
:param status_command: command to execute to get the resource status that
is expected to reach target_status
:type status_command: str
:param full_status_command: command to execute to get insights on why the
resource failed to reach target_status
:type full_status_command: str
:param resource_id: resource ID to monitor
:type resource_id: str
:param target_status: status to monitor for
@@ -47,7 +52,7 @@ def _resource_reaches_status(
resource_status = (
juju_utils.remote_run(
unit,
remote_cmd=command.format(
remote_cmd=status_command.format(
auth_args=auth_args, resource_id=resource_id
),
timeout=180,
@@ -63,7 +68,20 @@ def _resource_reaches_status(
)
if resource_status == target_status:
return
raise Exception("Resource not ready: {}".format(resource_status))
full_resource_status = (
juju_utils.remote_run(
unit,
remote_cmd=full_status_command.format(
auth_args=auth_args, resource_id=resource_id
),
timeout=180,
fatal=True,
)
.strip()
)
raise Exception("Resource not ready:\n{}".format(full_resource_status))
class WorkloadmgrCLIHelper(object):
@@ -78,7 +96,12 @@ class WorkloadmgrCLIHelper(object):
WORKLOAD_STATUS_CMD = (
"openstack {auth_args} workload show "
"-f value -c status "
" {resource_id} "
"{resource_id}"
)
WORKLOAD_FULL_STATUS_CMD = (
"openstack {auth_args} workload show "
"{resource_id}"
)
SNAPSHOT_CMD = (
@@ -94,7 +117,12 @@ class WorkloadmgrCLIHelper(object):
SNAPSHOT_STATUS_CMD = (
"openstack {auth_args} workload snapshot show "
"-f value -c status "
"{resource_id} "
"{resource_id}"
)
SNAPSHOT_FULL_STATUS_CMD = (
"openstack {auth_args} workload snapshot show "
"{resource_id}"
)
ONECLICK_RESTORE_CMD = (
@@ -110,7 +138,13 @@ class WorkloadmgrCLIHelper(object):
RESTORE_STATUS_CMD = (
"openstack {auth_args} workloadmgr restore show "
"-f value -c status {resource_id}"
"-f value -c status "
"{resource_id}"
)
RESTORE_FULL_STATUS_CMD = (
"openstack {auth_args} workloadmgr restore show "
"{resource_id}"
)
def __init__(self, keystone_client):
@@ -193,6 +227,7 @@ class WorkloadmgrCLIHelper(object):
self.trilio_wlm_unit,
self.auth_args,
self.WORKLOAD_STATUS_CMD,
self.WORKLOAD_FULL_STATUS_CMD,
workload_id,
"available",
)
@@ -235,6 +270,7 @@ class WorkloadmgrCLIHelper(object):
self.trilio_wlm_unit,
self.auth_args,
self.SNAPSHOT_STATUS_CMD,
self.SNAPSHOT_FULL_STATUS_CMD,
snapshot_id,
"available",
)
@@ -275,6 +311,7 @@ class WorkloadmgrCLIHelper(object):
self.trilio_wlm_unit,
self.auth_args,
self.RESTORE_STATUS_CMD,
self.RESTORE_FULL_STATUS_CMD,
restore_id,
"available",
)
@@ -51,6 +51,10 @@ class BaseVaultTest(test_utils.OpenStackBaseTest):
vault_utils.auth_all(cls.clients, cls.vault_creds['root_token'])
vault_utils.ensure_secret_backend(cls.clients[0])
def tearDown(self):
"""Tun test cleanup for Vault tests."""
vault_utils.unseal_all(self.clients, self.vault_creds['keys'][0])
@contextlib.contextmanager
def pause_resume(self, services, pgrep_full=False):
"""Override pause_resume for Vault behavior."""
+2 -2
View File
@@ -27,7 +27,7 @@ import yaml
import collections
import zaza.model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.utilities.networking as network_utils
AUTH_FILE = "vault_tests.yaml"
CharmVaultClient = collections.namedtuple(
@@ -102,7 +102,7 @@ def get_unit_api_url(ip):
transport = 'http'
if vault_config['ssl-cert']['value']:
transport = 'https'
return '{}://{}:8200'.format(transport, test_utils.format_addr(ip))
return '{}://{}:8200'.format(transport, network_utils.format_addr(ip))
def get_hvac_client(vault_url, cacert=None):
+38
View File
@@ -5,6 +5,11 @@ import logging
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.model as zaza_model
REPLICATED_POOL_TYPE = 'replicated'
ERASURE_POOL_TYPE = 'erasure-coded'
REPLICATED_POOL_CODE = 1
ERASURE_POOL_CODE = 3
def get_expected_pools(radosgw=False):
"""Get expected ceph pools.
@@ -97,6 +102,39 @@ def get_ceph_pools(unit_name, model_name=None):
return pools
def get_ceph_pool_details(query_leader=True, unit_name=None, model_name=None):
"""Get ceph pool details.
Return a list of ceph pools details dicts.
:param query_leader: Whether to query the leader for pool details.
:type query_leader: bool
:param unit_name: Name of unit to get the pools on if query_leader is False
:type unit_name: string
:param model_name: Name of model to operate in
:type model_name: str
:returns: Dict of ceph pools
:rtype: List[Dict,]
:raise: zaza_model.CommandRunFailed
"""
cmd = 'sudo ceph osd pool ls detail -f json'
if query_leader and unit_name:
raise ValueError("Cannot set query_leader and unit_name")
if query_leader:
result = zaza_model.run_on_leader(
'ceph-mon',
cmd,
model_name=model_name)
else:
result = zaza_model.run_on_unit(
unit_name,
cmd,
model_name=model_name)
if int(result.get('Code')) != 0:
raise zaza_model.CommandRunFailed(cmd, result)
return json.loads(result.get('Stdout'))
def get_ceph_df(unit_name, model_name=None):
"""Return dict of ceph df json output, including ceph pool state.
+12
View File
@@ -68,6 +68,8 @@ from zaza.openstack.utilities import (
exceptions,
generic as generic_utils,
)
import zaza.utilities.networking as network_utils
CIRROS_RELEASE_URL = 'http://download.cirros-cloud.net/version/released'
CIRROS_IMAGE_URL = 'http://download.cirros-cloud.net'
@@ -1704,6 +1706,7 @@ def get_overcloud_auth(address=None, model_name=None):
if not address:
address = get_keystone_ip(model_name=model_name)
address = network_utils.format_addr(address)
password = juju_utils.leader_get(
'keystone',
@@ -1737,6 +1740,15 @@ def get_overcloud_auth(address=None, model_name=None):
}
if tls_rid:
unit = model.get_first_unit_name('keystone', model_name=model_name)
# ensure that the path to put the local cacert in actually exists. The
# assumption that 'tests/' exists for, say, mojo is false.
# Needed due to:
# commit: 537473ad3addeaa3d1e4e2d0fd556aeaa4018eb2
_dir = os.path.dirname(KEYSTONE_LOCAL_CACERT)
if not os.path.exists(_dir):
os.makedirs(_dir)
model.scp_from_unit(
unit,
KEYSTONE_REMOTE_CACERT,
+35 -3
View File
@@ -14,11 +14,13 @@
"""Collection of functions for testing series upgrade."""
import asyncio
import collections
import copy
import concurrent
import logging
import os
import time
from zaza import model
from zaza.charm_lifecycle import utils as cl_utils
@@ -642,14 +644,14 @@ def series_upgrade(unit_name, machine_num,
model.block_until_unit_wl_status(unit_name, "blocked")
logging.info("Waiting for model idleness")
model.block_until_all_units_idle()
logging.info("Complete series upgrade on {}".format(machine_num))
model.complete_series_upgrade(machine_num)
model.block_until_all_units_idle()
logging.info("Set origin on {}".format(application))
# Allow for charms which have neither source nor openstack-origin
if origin:
os_utils.set_origin(application, origin)
model.block_until_all_units_idle()
logging.info("Complete series upgrade on {}".format(machine_num))
model.complete_series_upgrade(machine_num)
model.block_until_all_units_idle()
logging.info("Running run_post_upgrade_functions {}".format(
post_upgrade_functions))
run_post_upgrade_functions(post_upgrade_functions)
@@ -882,6 +884,21 @@ def dist_upgrade(unit_name):
"""-o "Dpkg::Options::=--force-confdef" """
"""-o "Dpkg::Options::=--force-confold" dist-upgrade""")
model.run_on_unit(unit_name, dist_upgrade_cmd)
rdict = model.run_on_unit(unit_name, "cat /var/run/reboot-required")
if "Stdout" in rdict and "restart" in rdict["Stdout"].lower():
logging.info("dist-upgrade required reboot {}".format(unit_name))
os_utils.reboot(unit_name)
logging.info("Waiting for workload status 'unknown' on {}"
.format(unit_name))
model.block_until_unit_wl_status(unit_name, "unknown")
logging.info("Waiting for workload status to return to normal on {}"
.format(unit_name))
model.block_until_unit_wl_status(
unit_name, "unknown", negate_match=True)
logging.info("Waiting for model idleness")
# pause for a big
time.sleep(5.0)
model.block_until_all_units_idle()
async def async_dist_upgrade(unit_name):
@@ -902,6 +919,21 @@ async def async_dist_upgrade(unit_name):
"""-o "Dpkg::Options::=--force-confdef" """
"""-o "Dpkg::Options::=--force-confold" dist-upgrade""")
await model.async_run_on_unit(unit_name, dist_upgrade_cmd)
rdict = await model.async_run_on_unit(unit_name,
"cat /var/run/reboot-required")
if "Stdout" in rdict and "restart" in rdict["Stdout"].lower():
logging.info("dist-upgrade required reboot {}".format(unit_name))
await os_utils.async_reboot(unit_name)
logging.info("Waiting for workload status 'unknown' on {}"
.format(unit_name))
await model.async_block_until_unit_wl_status(unit_name, "unknown")
logging.info("Waiting for workload status to return to normal on {}"
.format(unit_name))
await model.async_block_until_unit_wl_status(
unit_name, "unknown", negate_match=True)
logging.info("Waiting for model idleness")
await asyncio.sleep(5.0)
await model.async_block_until_all_units_idle()
def do_release_upgrade(unit_name):
+41 -16
View File
@@ -13,15 +13,17 @@
# limitations under the License.
"""Collection of functions to support upgrade testing."""
import re
import itertools
import logging
import collections
import re
import zaza.model
SERVICE_GROUPS = collections.OrderedDict([
('Stateful Services', ['percona-cluster', 'rabbitmq-server', 'ceph-mon',
'mysql-innodb-cluster']),
SERVICE_GROUPS = (
('Database Services', ['percona-cluster', 'mysql-innodb-cluster']),
('Stateful Services', ['rabbitmq-server', 'ceph-mon']),
('Core Identity', ['keystone']),
('Control Plane', [
'aodh', 'barbican', 'ceilometer', 'ceph-fs',
@@ -31,8 +33,7 @@ SERVICE_GROUPS = collections.OrderedDict([
'nova-cloud-controller', 'openstack-dashboard']),
('Data Plane', [
'nova-compute', 'ceph-osd',
'swift-proxy', 'swift-storage'])
])
'swift-proxy', 'swift-storage']))
UPGRADE_EXCLUDE_LIST = ['rabbitmq-server', 'percona-cluster']
@@ -106,6 +107,30 @@ def _apply_extra_filters(filters, extra_filters):
return filters
def _filter_easyrsa(app, app_config, model_name=None):
charm_name = extract_charm_name_from_url(app_config['charm'])
if "easyrsa" in charm_name:
logging.warn("Skipping upgrade of easyrsa Bug #1850121")
return True
return False
def _filter_etcd(app, app_config, model_name=None):
charm_name = extract_charm_name_from_url(app_config['charm'])
if "etcd" in charm_name:
logging.warn("Skipping upgrade of easyrsa Bug #1850124")
return True
return False
def _filter_memcached(app, app_config, model_name=None):
charm_name = extract_charm_name_from_url(app_config['charm'])
if "memcached" in charm_name:
logging.warn("Skipping upgrade of memcached charm")
return True
return False
def get_upgrade_groups(model_name=None, extra_filters=None):
"""Place apps in the model into their upgrade groups.
@@ -170,21 +195,21 @@ def get_charm_upgrade_groups(model_name=None, extra_filters=None):
def _build_service_groups(applications):
groups = collections.OrderedDict()
for phase_name, charms in SERVICE_GROUPS.items():
groups = []
for phase_name, charms in SERVICE_GROUPS:
group = []
for app, app_config in applications.items():
charm_name = extract_charm_name_from_url(app_config['charm'])
if charm_name in charms:
group.append(app)
groups[phase_name] = group
groups.append((phase_name, group))
sweep_up = []
for app in applications:
if not (app in [a for group in groups.values() for a in group]):
sweep_up.append(app)
groups['sweep_up'] = sweep_up
for name, group in groups.items():
# collect all the values into a list, and then a lookup hash
values = list(itertools.chain(*(ls for _, ls in groups)))
vhash = {v: 1 for v in values}
sweep_up = [app for app in applications if app not in vhash]
groups.append(('sweep_up', sweep_up))
for name, group in groups:
group.sort()
return groups