Merge branch 'master' into retry-with-tenacity

Change-Id: Ifac26ffb6d53d75b9dbd314693252ad8b3364c5d
This commit is contained in:
Aurelien Lourot
2021-07-16 11:52:26 +02:00
169 changed files with 24309 additions and 1947 deletions

27
.github/workflows/tox.yaml vendored Normal file
View File

@@ -0,0 +1,27 @@
name: Python package
on:
- push
- pull_request
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.5, 3.6, 3.7, 3.8, 3.9]
steps:
- uses: actions/checkout@v1
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install tox tox-gh-actions
- name: Lint with tox
run: tox -e pep8
- name: Test with tox
run: tox -e py${{ matrix.python-version }}

View File

@@ -1,7 +1,9 @@
sudo: true
dist: xenial
language: python
install: pip install tox-travis
install:
- pip install tox-travis
- pip install codecov
matrix:
include:
- name: "Python 3.5"
@@ -13,5 +15,10 @@ matrix:
- name: "Python 3.7"
python: 3.7
env: ENV=pep8,py3
- name: "Python 3.8"
python: 3.7
env: ENV=pep8,py3
script:
- tox -c tox.ini -e $ENV
after_success:
- codecov --verbose --gcov-glob unit_tests/*

1
MANIFEST.in Normal file
View File

@@ -0,0 +1 @@
recursive-include zaza/openstack *.j2

View File

@@ -11,7 +11,7 @@ charm_name: pacemaker-remote
tests:
- zaza.openstack.charm_tests.pacemaker_remote.tests.PacemakerRemoteTest
configure:
- zaza.openstack.charm_tests.noop.setup.basic_setup
- zaza.charm_tests.noop.setup.basic_setup
gate_bundles:
- basic
smoke_bundles:
@@ -23,4 +23,4 @@ test-requirements.txt:
```
git+https://github.com/openstack-charmers/zaza.git#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
```
```

2
codecov.yml Normal file
View File

@@ -0,0 +1,2 @@
ignore:
- "zaza/openstack/charm_tests/**/*tests.py"

View File

@@ -1,10 +1,14 @@
# pin lxml < 4.6.3 for py35 as no wheels exist for 4.6.3 (deprecated platform)
# This is necessary for Xenial builders
# BUG: https://github.com/openstack-charmers/zaza-openstack-tests/issues/530
lxml<4.6.3
aiounittest
async_generator
juju
boto3
juju!=2.8.3 # blacklist 2.8.3 as it appears to have a connection bug
juju_wait
PyYAML<=4.2,>=3.0
flake8>=2.2.4,<=3.5.0
PyYAML<=4.2,>=3.0
flake8>=2.2.4
flake8-docstrings
flake8-per-file-ignores
pydocstyle<4.0.0
@@ -21,12 +25,17 @@ dnspython>=1.12.0
psutil>=1.1.1,<2.0.0
python-openstackclient>=3.14.0
aodhclient
gnocchiclient>=7.0.5,<8.0.0
pika>=1.1.0,<2.0.0
python-barbicanclient
python-designateclient
python-ceilometerclient
python-cinderclient
python-glanceclient
python-heatclient
python-ironicclient
python-keystoneclient
python-manilaclient
python-neutronclient
python-novaclient
python-octaviaclient
@@ -34,7 +43,13 @@ python-swiftclient
tenacity
distro-info
paramiko
# Documentation requirements
sphinx
sphinxcontrib-asyncio
git+https://github.com/openstack-charmers/zaza#egg=zaza
# Newer versions require a Rust compiler to build, see
# * https://github.com/openstack-charmers/zaza/issues/421
# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html
cryptography<3.4

View File

@@ -25,22 +25,40 @@ from setuptools.command.test import test as TestCommand
version = "0.0.1.dev1"
install_require = [
'futurist<2.0.0',
'async_generator',
'cryptography',
'boto3',
# Newer versions require a Rust compiler to build, see
# * https://github.com/openstack-charmers/zaza/issues/421
# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html
'cryptography<3.4',
'dnspython',
'hvac<0.7.0',
'jinja2',
'juju',
'juju-wait',
'lxml',
'PyYAML',
'tenacity',
'oslo.config',
'python-glanceclient',
'python-keystoneclient',
'python-novaclient',
'python-neutronclient',
'python-octaviaclient',
'python-cinderclient',
'python-swiftclient',
'oslo.config<6.12.0',
'aodhclient<1.4.0',
'gnocchiclient>=7.0.5,<8.0.0',
'pika>=1.1.0,<2.0.0',
'python-barbicanclient>=4.0.1,<5.0.0',
'python-designateclient>=1.5,<3.0.0',
'python-heatclient<2.0.0',
'python-ironicclient',
'python-glanceclient<3.0.0',
'python-keystoneclient<3.22.0',
'python-manilaclient<2.0.0',
'python-novaclient<16.0.0',
'python-neutronclient<7.0.0',
'python-octaviaclient<1.11.0',
'python-ceilometerclient',
'python-cinderclient<6.0.0',
'python-swiftclient<3.9.0',
'zaza@git+https://github.com/openstack-charmers/zaza.git#egg=zaza',
]
@@ -96,10 +114,11 @@ setup(
license='Apache-2.0: http://www.apache.org/licenses/LICENSE-2.0',
packages=find_packages(exclude=["unit_tests"]),
zip_safe=False,
include_package_data=True,
cmdclass={'test': Tox},
install_requires=install_require,
extras_require={
'testing': tests_require,
},
tests_require=tests_require,
)
)

View File

@@ -1,2 +0,0 @@
This directory contains functional test definition for functional test of Zaza
itself.

View File

@@ -1,5 +0,0 @@
series: bionic
applications:
magpie:
charm: cs:~admcleod/magpie
num_units: 2

View File

@@ -1 +0,0 @@
comment: this bundle overlay intentionally left blank

View File

@@ -1,11 +0,0 @@
charm_name: none
gate_bundles:
- magpie
target_deploy_status:
magpie:
workload-status: active
workload-status-message: icmp ok, local hostname ok, dns ok
configure:
- zaza.openstack.charm_tests.noop.setup.basic_setup
tests:
- zaza.openstack.charm_tests.noop.tests.NoopTest

42
tox.ini
View File

@@ -1,6 +1,22 @@
[tox]
envlist = pep8, py3
skipsdist = True
# NOTE: Avoid build/test env pollution by not enabling sitepackages.
sitepackages = False
# NOTE: Avoid false positives by not skipping missing interpreters.
skip_missing_interpreters = False
# NOTES:
# * We avoid the new dependency resolver by pinning pip < 20.3, see
# https://github.com/pypa/pip/issues/9187
# * Pinning dependencies requires tox >= 3.2.0, see
# https://tox.readthedocs.io/en/latest/config.html#conf-requires
# * It is also necessary to pin virtualenv as a newer virtualenv would still
# lead to fetching the latest pip in the func* tox targets, see
# https://stackoverflow.com/a/38133283
requires = pip < 20.3
virtualenv < 20.0
# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci
minversion = 3.2.0
[testenv]
setenv = VIRTUAL_ENV={envdir}
@@ -8,12 +24,32 @@ setenv = VIRTUAL_ENV={envdir}
install_command =
pip install {opts} {packages}
commands = nosetests --with-coverage --cover-package=zaza {posargs} {toxinidir}/unit_tests
commands = nosetests --with-coverage --cover-package=zaza.openstack {posargs} {toxinidir}/unit_tests
[testenv:py3]
basepython = python3
deps = -r{toxinidir}/requirements.txt
[testenv:py3.5]
basepython = python3.5
deps = -r{toxinidir}/requirements.txt
[testenv:py3.6]
basepython = python3.6
deps = -r{toxinidir}/requirements.txt
[testenv:py3.7]
basepython = python3.7
deps = -r{toxinidir}/requirements.txt
[testenv:py3.8]
basepython = python3.8
deps = -r{toxinidir}/requirements.txt
[testenv:py3.9]
basepython = python3.9
deps = -r{toxinidir}/requirements.txt
[testenv:pep8]
basepython = python3
deps = -r{toxinidir}/requirements.txt
@@ -25,7 +61,7 @@ deps = -r{toxinidir}/requirements.txt
commands = /bin/true
[flake8]
ignore = E402,E226
ignore = E402,E226,W504
per-file-ignores =
unit_tests/**: D
@@ -34,4 +70,4 @@ basepython = python3
changedir = doc/source
deps =
-r{toxinidir}/requirements.txt
commands = sphinx-build -W -b html -d {toxinidir}/doc/build/doctrees . {toxinidir}/doc/build/html
commands = sphinx-build -W -b html -d {toxinidir}/doc/build/doctrees . {toxinidir}/doc/build/html

View File

@@ -11,3 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest.mock as mock
sys.modules['zaza.utilities.maas'] = mock.MagicMock()

View File

@@ -0,0 +1,13 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@@ -0,0 +1,40 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import mock
import unittest
import sys
import zaza.openstack.charm_tests.mysql.utils as mysql_utils
class TestMysqlUtils(unittest.TestCase):
"""Test class to encapsulate testing Mysql test utils."""
def setUp(self):
super(TestMysqlUtils, self).setUp()
if sys.version_info < (3, 6, 0):
raise unittest.SkipTest("Can't AsyncMock in py35")
@mock.patch.object(mysql_utils, 'model')
def test_mysql_complete_cluster_series_upgrade(self, mock_model):
run_action_on_leader = mock.AsyncMock()
mock_model.async_run_action_on_leader = run_action_on_leader
asyncio.get_event_loop().run_until_complete(
mysql_utils.complete_cluster_series_upgrade())
run_action_on_leader.assert_called_once_with(
'mysql',
'complete-cluster-series-upgrade',
action_params={})

View File

@@ -0,0 +1,40 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import mock
import unittest
import sys
import zaza.openstack.charm_tests.rabbitmq_server.utils as rabbit_utils
class TestRabbitUtils(unittest.TestCase):
"""Test class to encapsulate testing Mysql test utils."""
def setUp(self):
super(TestRabbitUtils, self).setUp()
if sys.version_info < (3, 6, 0):
raise unittest.SkipTest("Can't AsyncMock in py35")
@mock.patch.object(rabbit_utils.zaza, 'model')
def test_rabbit_complete_cluster_series_upgrade(self, mock_model):
run_action_on_leader = mock.AsyncMock()
mock_model.async_run_action_on_leader = run_action_on_leader
asyncio.get_event_loop().run_until_complete(
rabbit_utils.complete_cluster_series_upgrade())
run_action_on_leader.assert_called_once_with(
'rabbitmq-server',
'complete-cluster-series-upgrade',
action_params={})

View File

@@ -0,0 +1,66 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
import zaza.openstack.charm_tests.tempest.setup as tempest_setup
class TestTempestSetup(unittest.TestCase):
"""Test class to encapsulate testing Mysql test utils."""
def setUp(self):
super(TestTempestSetup, self).setUp()
def test_add_environment_var_config_with_missing_variable(self):
ctxt = {}
with self.assertRaises(Exception) as context:
tempest_setup.add_environment_var_config(ctxt, ['swift'])
self.assertEqual(
('Environment variables [TEST_SWIFT_IP] must all be '
'set to run this test'),
str(context.exception))
@mock.patch.object(tempest_setup.deployment_env, 'get_deployment_context')
def test_add_environment_var_config_with_all_variables(
self,
get_deployment_context):
ctxt = {}
get_deployment_context.return_value = {
'TEST_GATEWAY': 'test',
'TEST_CIDR_EXT': 'test',
'TEST_FIP_RANGE': 'test',
'TEST_NAME_SERVER': 'test',
'TEST_CIDR_PRIV': 'test',
}
tempest_setup.add_environment_var_config(ctxt, ['neutron'])
self.assertEqual(ctxt['test_gateway'], 'test')
@mock.patch.object(tempest_setup.deployment_env, 'get_deployment_context')
def test_add_environment_var_config_with_some_variables(
self,
get_deployment_context):
ctxt = {}
get_deployment_context.return_value = {
'TEST_GATEWAY': 'test',
'TEST_NAME_SERVER': 'test',
'TEST_CIDR_PRIV': 'test',
}
with self.assertRaises(Exception) as context:
tempest_setup.add_environment_var_config(ctxt, ['neutron'])
self.assertEqual(
('Environment variables [TEST_CIDR_EXT, TEST_FIP_RANGE] must '
'all be set to run this test'),
str(context.exception))

View File

@@ -0,0 +1,201 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import zaza.openstack.charm_tests.test_utils as test_utils
import unit_tests.utils as ut_utils
class TestBaseCharmTest(ut_utils.BaseTestCase):
def setUp(self):
super(TestBaseCharmTest, self).setUp()
self.target = test_utils.BaseCharmTest()
def patch_target(self, attr, return_value=None):
mocked = mock.patch.object(self.target, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
def test_get_my_tests_options(self):
class FakeTest(test_utils.BaseCharmTest):
def method(self, test_config):
self.test_config = test_config
return self.get_my_tests_options('aKey', 'aDefault')
f = FakeTest()
self.assertEquals(f.method({}), 'aDefault')
self.assertEquals(f.method({
'tests_options': {
'unit_tests.charm_tests.test_utils.'
'FakeTest.method.aKey': 'aValue',
},
}), 'aValue')
def test_config_change(self):
default_config = {'fakeKey': 'testProvidedDefault'}
alterna_config = {'fakeKey': 'testProvidedAlterna'}
self.target.model_name = 'aModel'
self.target.test_config = {}
self.patch_target('config_current')
self.config_current.return_value = default_config
self.patch_object(test_utils.model, 'set_application_config')
self.patch_object(test_utils.model, 'wait_for_agent_status')
self.patch_object(test_utils.model, 'wait_for_application_states')
self.patch_object(test_utils.model, 'block_until_all_units_idle')
with self.target.config_change(
default_config, alterna_config, application_name='anApp'):
self.set_application_config.assert_called_once_with(
'anApp', alterna_config, model_name='aModel')
self.wait_for_agent_status.assert_called_once_with(
model_name='aModel')
self.wait_for_application_states.assert_called_once_with(
model_name='aModel', states={})
self.block_until_all_units_idle.assert_called_once_with()
# after yield we will have different calls than the above, measure both
self.set_application_config.assert_has_calls([
mock.call('anApp', alterna_config, model_name='aModel'),
mock.call('anApp', default_config, model_name='aModel'),
])
self.wait_for_application_states.assert_has_calls([
mock.call(model_name='aModel', states={}),
mock.call(model_name='aModel', states={}),
])
self.block_until_all_units_idle.assert_has_calls([
mock.call(),
mock.call(),
])
# confirm operation with `reset_to_charm_default`
self.set_application_config.reset_mock()
self.wait_for_agent_status.reset_mock()
self.wait_for_application_states.reset_mock()
self.patch_object(test_utils.model, 'reset_application_config')
with self.target.config_change(
default_config, alterna_config, application_name='anApp',
reset_to_charm_default=True):
self.set_application_config.assert_called_once_with(
'anApp', alterna_config, model_name='aModel')
# we want to assert this not to be called after yield
self.set_application_config.reset_mock()
self.assertFalse(self.set_application_config.called)
self.reset_application_config.assert_called_once_with(
'anApp', list(alterna_config.keys()), model_name='aModel')
self.wait_for_application_states.assert_has_calls([
mock.call(model_name='aModel', states={}),
mock.call(model_name='aModel', states={}),
])
self.block_until_all_units_idle.assert_has_calls([
mock.call(),
mock.call(),
])
# confirm operation where both default and alternate config passed in
# are the same. This is used to set config and not change it back.
self.set_application_config.reset_mock()
self.wait_for_agent_status.reset_mock()
self.wait_for_application_states.reset_mock()
self.reset_application_config.reset_mock()
with self.target.config_change(
alterna_config, alterna_config, application_name='anApp'):
self.set_application_config.assert_called_once_with(
'anApp', alterna_config, model_name='aModel')
# we want to assert these not to be called after yield
self.set_application_config.reset_mock()
self.wait_for_agent_status.reset_mock()
self.wait_for_application_states.reset_mock()
self.assertFalse(self.set_application_config.called)
self.assertFalse(self.reset_application_config.called)
self.assertFalse(self.wait_for_agent_status.called)
self.assertFalse(self.wait_for_application_states.called)
def test_separate_non_string_config(self):
intended_cfg_keys = ['foo2', 'foo3', 'foo4', 'foo5']
current_config_mock = {
'foo2': None,
'foo3': 'old_bar3',
'foo4': None,
'foo5': 'old_bar5',
}
self.patch_target('config_current')
self.config_current.return_value = current_config_mock
non_string_type_keys = ['foo2', 'foo3', 'foo4']
expected_result_filtered = {
'foo3': 'old_bar3',
'foo5': 'old_bar5',
}
expected_result_special = {
'foo2': None,
'foo4': None,
}
current, non_string = (
self.target.config_current_separate_non_string_type_keys(
non_string_type_keys, intended_cfg_keys, 'application_name')
)
self.assertEqual(expected_result_filtered, current)
self.assertEqual(expected_result_special, non_string)
self.config_current.assert_called_once_with(
'application_name', intended_cfg_keys)
def test_separate_special_config_None_params(self):
current_config_mock = {
'foo1': 'old_bar1',
'foo2': None,
'foo3': 'old_bar3',
'foo4': None,
'foo5': 'old_bar5',
}
self.patch_target('config_current')
self.config_current.return_value = current_config_mock
non_string_type_keys = ['foo2', 'foo3', 'foo4']
expected_result_filtered = {
'foo1': 'old_bar1',
'foo3': 'old_bar3',
'foo5': 'old_bar5',
}
expected_result_special = {
'foo2': None,
'foo4': None,
}
current, non_string = (
self.target.config_current_separate_non_string_type_keys(
non_string_type_keys)
)
self.assertEqual(expected_result_filtered, current)
self.assertEqual(expected_result_special, non_string)
self.config_current.assert_called_once_with(None, None)
class TestOpenStackBaseTest(ut_utils.BaseTestCase):
def test_setUpClass(self):
self.patch_object(test_utils.openstack_utils, 'get_cacert')
self.patch_object(test_utils.openstack_utils,
'get_overcloud_keystone_session')
self.patch_object(test_utils.BaseCharmTest, 'setUpClass')
class MyTestClass(test_utils.OpenStackBaseTest):
model_name = 'deadbeef'
MyTestClass.setUpClass('foo', 'bar')
self.setUpClass.assert_called_with('foo', 'bar')

View File

@@ -0,0 +1,69 @@
# flake8: noqa
SWIFT_GET_NODES_STDOUT = """
Account 23934cb1850c4d28b1ca113a24c0e46b
Container zaza-swift-gr-tests-f3129278-container
Object zaza_test_object.txt
Partition 146
Hash 928c2f8006efeeb4b1164f4cce035887
Server:Port Device 10.5.0.38:6000 loop0
Server:Port Device 10.5.0.4:6000 loop0
Server:Port Device 10.5.0.9:6000 loop0 [Handoff]
Server:Port Device 10.5.0.34:6000 loop0 [Handoff]
Server:Port Device 10.5.0.15:6000 loop0 [Handoff]
Server:Port Device 10.5.0.18:6000 loop0 [Handoff]
curl -g -I -XHEAD "http://10.5.0.38:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt"
curl -g -I -XHEAD "http://10.5.0.4:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt"
curl -g -I -XHEAD "http://10.5.0.9:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" # [Handoff]
curl -g -I -XHEAD "http://10.5.0.34:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" # [Handoff]
curl -g -I -XHEAD "http://10.5.0.15:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" # [Handoff]
curl -g -I -XHEAD "http://10.5.0.18:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" # [Handoff]
Use your own device location of servers:
such as "export DEVICE=/srv/node"
ssh 10.5.0.38 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887"
ssh 10.5.0.4 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887"
ssh 10.5.0.9 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" # [Handoff]
ssh 10.5.0.34 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" # [Handoff]
ssh 10.5.0.15 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" # [Handoff]
ssh 10.5.0.18 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" # [Handoff]
note: `/srv/node*` is used as default value of `devices`, the real value is set in the config file on each storage node.
"""
STORAGE_TOPOLOGY = {
'10.5.0.18': {
'app_name': 'swift-storage-region1-zone1',
'unit': "swift-storage-region1-zone1/0",
'region': 1,
'zone': 1},
'10.5.0.34': {
'app_name': 'swift-storage-region1-zone2',
'unit': "swift-storage-region1-zone2/0",
'region': 1,
'zone': 2},
'10.5.0.4': {
'app_name': 'swift-storage-region1-zone3',
'unit': "swift-storage-region1-zone3/0",
'region': 1,
'zone': 3},
'10.5.0.9': {
'app_name': 'swift-storage-region2-zone1',
'unit': "swift-storage-region2-zone1/0",
'region': 2,
'zone': 1},
'10.5.0.15': {
'app_name': 'swift-storage-region2-zone2',
'unit': "swift-storage-region2-zone2/0",
'region': 2, 'zone': 2},
'10.5.0.38': {
'app_name': 'swift-storage-region2-zone3',
'unit': "swift-storage-region2-zone3/0",
'region': 2,
'zone': 3}}

View File

@@ -0,0 +1,188 @@
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unit_tests.utils as ut_utils
import zaza.openstack.utilities as utilities
class SomeException(Exception):
pass
class SomeException2(Exception):
pass
class SomeException3(Exception):
pass
class TestObjectRetrierWraps(ut_utils.BaseTestCase):
def test_object_wrap(self):
class A:
def func(self, a, b=1):
return a + b
a = A()
wrapped_a = utilities.ObjectRetrierWraps(a)
self.assertEqual(wrapped_a.func(3), 4)
def test_object_multilevel_wrap(self):
class A:
def f1(self, a, b):
return a * b
class B:
@property
def f2(self):
return A()
b = B()
wrapped_b = utilities.ObjectRetrierWraps(b)
self.assertEqual(wrapped_b.f2.f1(5, 6), 30)
def test_object_wrap_number(self):
class A:
class_a = 5
def __init__(self):
self.instance_a = 10
def f1(self, a, b):
return a * b
a = A()
wrapped_a = utilities.ObjectRetrierWraps(a)
self.assertEqual(wrapped_a.class_a, 5)
self.assertEqual(wrapped_a.instance_a, 10)
@mock.patch("time.sleep")
def test_object_wrap_exception(self, mock_sleep):
class A:
def func(self):
raise SomeException()
a = A()
# retry on a specific exception
wrapped_a = utilities.ObjectRetrierWraps(
a, num_retries=1, retry_exceptions=[SomeException])
with self.assertRaises(SomeException):
wrapped_a.func()
mock_sleep.assert_called_once_with(5)
# also retry on any exception if none specified
wrapped_a = utilities.ObjectRetrierWraps(a, num_retries=1)
mock_sleep.reset_mock()
with self.assertRaises(SomeException):
wrapped_a.func()
mock_sleep.assert_called_once_with(5)
# no retry if exception isn't listed.
wrapped_a = utilities.ObjectRetrierWraps(
a, num_retries=1, retry_exceptions=[SomeException2])
mock_sleep.reset_mock()
with self.assertRaises(SomeException):
wrapped_a.func()
mock_sleep.assert_not_called()
@mock.patch("time.sleep")
def test_log_called(self, mock_sleep):
class A:
def func(self):
raise SomeException()
a = A()
mock_log = mock.Mock()
wrapped_a = utilities.ObjectRetrierWraps(
a, num_retries=1, log=mock_log)
with self.assertRaises(SomeException):
wrapped_a.func()
# there should be two calls; one for the single retry and one for the
# failure.
self.assertEqual(mock_log.call_count, 2)
@mock.patch("time.sleep")
def test_back_off_maximum(self, mock_sleep):
class A:
def func(self):
raise SomeException()
a = A()
wrapped_a = utilities.ObjectRetrierWraps(a, num_retries=3, backoff=2)
with self.assertRaises(SomeException):
wrapped_a.func()
# Note third call hits maximum wait time of 15.
mock_sleep.assert_has_calls([mock.call(5),
mock.call(10),
mock.call(15)])
@mock.patch("time.sleep")
def test_total_wait(self, mock_sleep):
class A:
def func(self):
raise SomeException()
a = A()
wrapped_a = utilities.ObjectRetrierWraps(
a, num_retries=3, total_wait=9)
with self.assertRaises(SomeException):
wrapped_a.func()
# Note only two calls, as total wait is 9, so a 3rd retry would exceed
# that.
mock_sleep.assert_has_calls([mock.call(5),
mock.call(5)])
@mock.patch("time.sleep")
def test_retry_on_connect_failure(self, mock_sleep):
class A:
def func1(self):
raise SomeException()
def func2(self):
raise utilities.ConnectFailure()
a = A()
wrapped_a = utilities.retry_on_connect_failure(a, num_retries=2)
with self.assertRaises(SomeException):
wrapped_a.func1()
mock_sleep.assert_not_called()
with self.assertRaises(utilities.ConnectFailure):
wrapped_a.func2()
mock_sleep.assert_has_calls([mock.call(5)])

View File

@@ -116,3 +116,20 @@ class TestCephUtils(ut_utils.BaseTestCase):
with self.assertRaises(model.CommandRunFailed):
ceph_utils.get_rbd_hash('aunit', 'apool', 'aimage',
model_name='amodel')
def test_pools_from_broker_req(self):
self.patch_object(ceph_utils.juju_utils, 'get_relation_from_unit')
self.get_relation_from_unit.return_value = {
'broker_req': (
'{"api-version": 1, "ops": ['
'{"op": "create-pool", "name": "cinder-ceph", '
'"compression-mode": null},'
'{"op": "create-pool", "name": "cinder-ceph", '
'"compression-mode": "aggressive"}]}'),
}
self.assertEquals(
ceph_utils.get_pools_from_broker_req(
'anApplication', 'aModelName'),
['cinder-ceph'])
self.get_relation_from_unit.assert_called_once_with(
'ceph-mon', 'anApplication', None, model_name='aModelName')

View File

@@ -125,23 +125,30 @@ class TestGenericUtils(ut_utils.BaseTestCase):
return _env.get(key)
self.get.side_effect = _get_env
# OSCI backward compatible env vars
# Prefered OSCI TEST_ env vars
_env = {"NET_ID": "netid",
"NAMESERVER": "10.0.0.10",
"NAME_SERVER": "10.0.0.10",
"GATEWAY": "10.0.0.1",
"CIDR_EXT": "10.0.0.0/24",
"FIP_RANGE": "10.0.200.0:10.0.200.254"}
"FIP_RANGE": "10.0.200.0:10.0.200.254",
"TEST_NET_ID": "test_netid",
"TEST_NAME_SERVER": "10.9.0.10",
"TEST_GATEWAY": "10.9.0.1",
"TEST_CIDR_EXT": "10.9.0.0/24",
"TEST_FIP_RANGE": "10.9.200.0:10.0.200.254"}
_expected_result = {}
_expected_result["net_id"] = _env["NET_ID"]
_expected_result["external_dns"] = _env["NAMESERVER"]
_expected_result["default_gateway"] = _env["GATEWAY"]
_expected_result["external_net_cidr"] = _env["CIDR_EXT"]
_expected_result["start_floating_ip"] = _env["FIP_RANGE"].split(":")[0]
_expected_result["end_floating_ip"] = _env["FIP_RANGE"].split(":")[1]
_expected_result["net_id"] = _env["TEST_NET_ID"]
_expected_result["external_dns"] = _env["TEST_NAME_SERVER"]
_expected_result["default_gateway"] = _env["TEST_GATEWAY"]
_expected_result["external_net_cidr"] = _env["TEST_CIDR_EXT"]
_expected_result["start_floating_ip"] = _env[
"TEST_FIP_RANGE"].split(":")[0]
_expected_result["end_floating_ip"] = _env[
"TEST_FIP_RANGE"].split(":")[1]
self.assertEqual(generic_utils.get_undercloud_env_vars(),
_expected_result)
# Overriding configure.network named variables
# Overriding local configure.network named variables
_override = {"start_floating_ip": "10.100.50.0",
"end_floating_ip": "10.100.50.254",
"default_gateway": "10.100.0.1",
@@ -166,34 +173,6 @@ class TestGenericUtils(ut_utils.BaseTestCase):
_yaml_dict)
self._open.assert_called_once_with(_filename, "r")
def test_do_release_upgrade(self):
_unit = "app/2"
generic_utils.do_release_upgrade(_unit)
self.subprocess.check_call.assert_called_once_with(
['juju', 'ssh', _unit, 'sudo', 'DEBIAN_FRONTEND=noninteractive',
'do-release-upgrade', '-f', 'DistUpgradeViewNonInteractive'])
def test_wrap_do_release_upgrade(self):
self.patch_object(generic_utils, "do_release_upgrade")
self.patch_object(generic_utils, "run_via_ssh")
self.patch_object(generic_utils.model, "scp_to_unit")
_unit = "app/2"
_from_series = "xenial"
_to_series = "bionic"
_workaround_script = "scriptname"
_files = ["filename", _workaround_script]
_scp_calls = []
_run_calls = [
mock.call(_unit, _workaround_script)]
for filename in _files:
_scp_calls.append(mock.call(_unit, filename, filename))
generic_utils.wrap_do_release_upgrade(
_unit, to_series=_to_series, from_series=_from_series,
workaround_script=_workaround_script, files=_files)
self.scp_to_unit.assert_has_calls(_scp_calls)
self.run_via_ssh.assert_has_calls(_run_calls)
self.do_release_upgrade.assert_called_once_with(_unit)
def test_reboot(self):
_unit = "app/2"
generic_utils.reboot(_unit)
@@ -219,146 +198,6 @@ class TestGenericUtils(ut_utils.BaseTestCase):
self.set_application_config.assert_called_once_with(
_application, {_origin: _pocket})
def test_series_upgrade(self):
self.patch_object(generic_utils.model, "block_until_all_units_idle")
self.patch_object(generic_utils.model, "block_until_unit_wl_status")
self.patch_object(generic_utils.model, "prepare_series_upgrade")
self.patch_object(generic_utils.model, "complete_series_upgrade")
self.patch_object(generic_utils.model, "set_series")
self.patch_object(generic_utils, "set_origin")
self.patch_object(generic_utils, "wrap_do_release_upgrade")
self.patch_object(generic_utils, "reboot")
_unit = "app/2"
_application = "app"
_machine_num = "4"
_from_series = "xenial"
_to_series = "bionic"
_origin = "source"
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
generic_utils.series_upgrade(
_unit, _machine_num, origin=_origin,
to_series=_to_series, from_series=_from_series,
workaround_script=_workaround_script, files=_files)
self.block_until_all_units_idle.called_with()
self.prepare_series_upgrade.assert_called_once_with(
_machine_num, to_series=_to_series)
self.wrap_do_release_upgrade.assert_called_once_with(
_unit, to_series=_to_series, from_series=_from_series,
workaround_script=_workaround_script, files=_files)
self.complete_series_upgrade.assert_called_once_with(_machine_num)
self.set_series.assert_called_once_with(_application, _to_series)
self.set_origin.assert_called_once_with(_application, _origin)
self.reboot.assert_called_once_with(_unit)
def test_series_upgrade_application_pause_peers_and_subordinates(self):
self.patch_object(generic_utils.model, "run_action")
self.patch_object(generic_utils, "series_upgrade")
_application = "app"
_from_series = "xenial"
_to_series = "bionic"
_origin = "source"
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
_completed_machines = []
# Peers and Subordinates
_run_action_calls = [
mock.call("{}-hacluster/1".format(_application),
"pause", action_params={}),
mock.call("{}/1".format(_application), "pause", action_params={}),
mock.call("{}-hacluster/2".format(_application),
"pause", action_params={}),
mock.call("{}/2".format(_application), "pause", action_params={}),
]
_series_upgrade_calls = []
for machine_num in ("0", "1", "2"):
_series_upgrade_calls.append(
mock.call("{}/{}".format(_application, machine_num),
machine_num, origin=_origin,
from_series=_from_series, to_series=_to_series,
workaround_script=_workaround_script, files=_files),
)
# Pause primary peers and subordinates
generic_utils.series_upgrade_application(
_application, origin=_origin,
to_series=_to_series, from_series=_from_series,
pause_non_leader_primary=True,
pause_non_leader_subordinate=True,
completed_machines=_completed_machines,
workaround_script=_workaround_script, files=_files),
self.run_action.assert_has_calls(_run_action_calls)
self.series_upgrade.assert_has_calls(_series_upgrade_calls)
def test_series_upgrade_application_pause_subordinates(self):
self.patch_object(generic_utils.model, "run_action")
self.patch_object(generic_utils, "series_upgrade")
_application = "app"
_from_series = "xenial"
_to_series = "bionic"
_origin = "source"
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
_completed_machines = []
# Subordinates only
_run_action_calls = [
mock.call("{}-hacluster/1".format(_application),
"pause", action_params={}),
mock.call("{}-hacluster/2".format(_application),
"pause", action_params={}),
]
_series_upgrade_calls = []
for machine_num in ("0", "1", "2"):
_series_upgrade_calls.append(
mock.call("{}/{}".format(_application, machine_num),
machine_num, origin=_origin,
from_series=_from_series, to_series=_to_series,
workaround_script=_workaround_script, files=_files),
)
# Pause subordinates
generic_utils.series_upgrade_application(
_application, origin=_origin,
to_series=_to_series, from_series=_from_series,
pause_non_leader_primary=False,
pause_non_leader_subordinate=True,
completed_machines=_completed_machines,
workaround_script=_workaround_script, files=_files),
self.run_action.assert_has_calls(_run_action_calls)
self.series_upgrade.assert_has_calls(_series_upgrade_calls)
def test_series_upgrade_application_no_pause(self):
self.patch_object(generic_utils.model, "run_action")
self.patch_object(generic_utils, "series_upgrade")
_application = "app"
_from_series = "xenial"
_to_series = "bionic"
_origin = "source"
_series_upgrade_calls = []
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
_completed_machines = []
for machine_num in ("0", "1", "2"):
_series_upgrade_calls.append(
mock.call("{}/{}".format(_application, machine_num),
machine_num, origin=_origin,
from_series=_from_series, to_series=_to_series,
workaround_script=_workaround_script, files=_files),
)
# No Pausiing
generic_utils.series_upgrade_application(
_application, origin=_origin,
to_series=_to_series, from_series=_from_series,
pause_non_leader_primary=False,
pause_non_leader_subordinate=False,
completed_machines=_completed_machines,
workaround_script=_workaround_script, files=_files)
self.run_action.assert_not_called()
self.series_upgrade.assert_has_calls(_series_upgrade_calls)
def test_set_dpkg_non_interactive_on_unit(self):
self.patch_object(generic_utils, "model")
_unit_name = "app/1"
@@ -561,3 +400,126 @@ class TestGenericUtils(ut_utils.BaseTestCase):
self.telnet.side_effect = generic_utils.socket.error
self.assertFalse(generic_utils.is_port_open(_port, _addr))
def test_get_unit_hostnames(self):
self.patch(
"zaza.openstack.utilities.generic.model.run_on_unit",
new_callable=mock.MagicMock(),
name="_run"
)
_unit1 = mock.MagicMock()
_unit1.entity_id = "testunit/1"
_unit2 = mock.MagicMock()
_unit2.entity_id = "testunit/2"
_hostname1 = "host1.domain"
_hostname2 = "host2.domain"
expected = {
_unit1.entity_id: _hostname1,
_unit2.entity_id: _hostname2,
}
_units = [_unit1, _unit2]
self._run.side_effect = [{"Stdout": _hostname1},
{"Stdout": _hostname2}]
actual = generic_utils.get_unit_hostnames(_units)
self.assertEqual(actual, expected)
expected_run_calls = [
mock.call('testunit/1', 'hostname'),
mock.call('testunit/2', 'hostname')]
self._run.assert_has_calls(expected_run_calls)
self._run.reset_mock()
self._run.side_effect = [{"Stdout": _hostname1},
{"Stdout": _hostname2}]
expected_run_calls = [
mock.call('testunit/1', 'hostname -f'),
mock.call('testunit/2', 'hostname -f')]
actual = generic_utils.get_unit_hostnames(_units, fqdn=True)
self._run.assert_has_calls(expected_run_calls)
def test_port_knock_units(self):
self.patch(
"zaza.openstack.utilities.generic.is_port_open",
new_callable=mock.MagicMock(),
name="_is_port_open"
)
_units = [
mock.MagicMock(),
mock.MagicMock(),
]
self._is_port_open.side_effect = [True, True]
self.assertIsNone(generic_utils.port_knock_units(_units))
self.assertEqual(self._is_port_open.call_count, len(_units))
self._is_port_open.side_effect = [True, False]
self.assertIsNotNone(generic_utils.port_knock_units(_units))
# check when func is expecting failure, i.e. should succeed
self._is_port_open.reset_mock()
self._is_port_open.side_effect = [False, False]
self.assertIsNone(generic_utils.port_knock_units(_units,
expect_success=False))
self.assertEqual(self._is_port_open.call_count, len(_units))
def test_check_commands_on_units(self):
self.patch(
"zaza.openstack.utilities.generic.model.run_on_unit",
new_callable=mock.MagicMock(),
name="_run"
)
num_units = 2
_units = [mock.MagicMock() for i in range(num_units)]
num_cmds = 3
cmds = ["/usr/bin/fakecmd"] * num_cmds
# Test success, all calls return 0
# zero is a string to replicate run_on_unit return data type
_cmd_results = [{"Code": "0"}] * len(_units) * len(cmds)
self._run.side_effect = _cmd_results
result = generic_utils.check_commands_on_units(cmds, _units)
self.assertIsNone(result)
self.assertEqual(self._run.call_count, len(_units) * len(cmds))
# Test failure, some call returns 1
_cmd_results[2] = {"Code": "1"}
self._run.side_effect = _cmd_results
result = generic_utils.check_commands_on_units(cmds, _units)
self.assertIsNotNone(result)
def test_systemctl(self):
self.patch_object(generic_utils.model, "get_unit_from_name")
self.patch_object(generic_utils.model, "run_on_unit")
_unit = mock.MagicMock()
_unit.entity_id = "unit/2"
_command = "stop"
_service = "servicename"
_systemctl = "/bin/systemctl {} {}".format(_command, _service)
self.run_on_unit.return_value = {"Code": 0}
self.get_unit_from_name.return_value = _unit
# With Unit object
generic_utils.systemctl(_unit, _service, command=_command)
self.run_on_unit.assert_called_with(_unit.entity_id, _systemctl)
# With string name unit
generic_utils.systemctl(_unit.entity_id, _service, command=_command)
self.run_on_unit.assert_called_with(_unit.entity_id, _systemctl)
# Failed return code
self.run_on_unit.return_value = {"Code": 1}
with self.assertRaises(AssertionError):
generic_utils.systemctl(
_unit.entity_id, _service, command=_command)

View File

@@ -1,271 +0,0 @@
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unit_tests.utils as ut_utils
from zaza.openstack.utilities import juju as juju_utils
class TestJujuUtils(ut_utils.BaseTestCase):
def setUp(self):
super(TestJujuUtils, self).setUp()
# Juju Status Object and data
self.key = "instance-id"
self.key_data = "machine-uuid"
self.machine = "1"
self.machine_data = {self.key: self.key_data}
self.unit = "app/1"
self.unit_data = {"machine": self.machine}
self.application = "app"
self.application_data = {"units": {self.unit: self.unit_data}}
self.subordinate_application = "subordinate_application"
self.subordinate_application_data = {
"subordinate-to": [self.application]}
self.juju_status = mock.MagicMock()
self.juju_status.name = "juju_status_object"
self.juju_status.applications.get.return_value = self.application_data
self.juju_status.machines.get.return_value = self.machine_data
# Model
self.patch_object(juju_utils, "model")
self.model_name = "model-name"
self.model.get_juju_model.return_value = self.model_name
self.model.get_status.return_value = self.juju_status
self.run_output = {"Code": "0", "Stderr": "", "Stdout": "RESULT"}
self.error_run_output = {"Code": "1", "Stderr": "ERROR", "Stdout": ""}
self.model.run_on_unit.return_value = self.run_output
# Clouds
self.cloud_name = "FakeCloudName"
self.cloud_type = "FakeCloudType"
self.clouds = {
"clouds":
{self.cloud_name:
{"type": self.cloud_type}}}
# Controller
self.patch_object(juju_utils, "controller")
self.controller.get_cloud.return_value = self.cloud_name
def test_get_application_status(self):
self.patch_object(juju_utils, "get_full_juju_status")
self.get_full_juju_status.return_value = self.juju_status
# Full status juju object return
self.assertEqual(
juju_utils.get_application_status(), self.juju_status)
self.get_full_juju_status.assert_called_once()
# Application only dictionary return
self.assertEqual(
juju_utils.get_application_status(application=self.application),
self.application_data)
# Unit no application dictionary return
self.assertEqual(
juju_utils.get_application_status(unit=self.unit),
self.unit_data)
def test_get_cloud_configs(self):
self.patch_object(juju_utils.Path, "home")
self.patch_object(juju_utils.generic_utils, "get_yaml_config")
self.get_yaml_config.return_value = self.clouds
# All the cloud configs
self.assertEqual(juju_utils.get_cloud_configs(), self.clouds)
# With cloud specified
self.assertEqual(juju_utils.get_cloud_configs(self.cloud_name),
self.clouds["clouds"][self.cloud_name])
def test_get_full_juju_status(self):
self.assertEqual(juju_utils.get_full_juju_status(), self.juju_status)
self.model.get_status.assert_called_once_with()
def test_get_machines_for_application(self):
self.patch_object(juju_utils, "get_application_status")
self.get_application_status.return_value = self.application_data
# Machine data
self.assertEqual(
juju_utils.get_machines_for_application(self.application),
[self.machine])
self.get_application_status.assert_called_once()
# Subordinate application has no units
def _get_application_status(application):
_apps = {
self.application: self.application_data,
self.subordinate_application:
self.subordinate_application_data}
return _apps[application]
self.get_application_status.side_effect = _get_application_status
self.assertEqual(
juju_utils.get_machines_for_application(
self.subordinate_application),
[self.machine])
def test_get_unit_name_from_host_name(self):
unit_mock1 = mock.MagicMock()
unit_mock1.data = {'machine-id': 12}
unit_mock1.entity_id = 'myapp/2'
unit_mock2 = mock.MagicMock()
unit_mock2.data = {'machine-id': 15}
unit_mock2.entity_id = 'myapp/5'
self.model.get_units.return_value = [unit_mock1, unit_mock2]
self.assertEqual(
juju_utils.get_unit_name_from_host_name('juju-model-12', 'myapp'),
'myapp/2')
def test_get_machine_status(self):
self.patch_object(juju_utils, "get_full_juju_status")
self.get_full_juju_status.return_value = self.juju_status
# All machine data
self.assertEqual(
juju_utils.get_machine_status(self.machine),
self.machine_data)
self.get_full_juju_status.assert_called_once()
# Request a specific key
self.assertEqual(
juju_utils.get_machine_status(self.machine, self.key),
self.key_data)
def test_get_machine_uuids_for_application(self):
self.patch_object(juju_utils, "get_machines_for_application")
self.get_machines_for_application.return_value = [self.machine]
self.assertEqual(
juju_utils.get_machine_uuids_for_application(self.application),
[self.machine_data.get("instance-id")])
self.get_machines_for_application.assert_called_once_with(
self.application)
def test_get_provider_type(self):
self.patch_object(juju_utils, "get_cloud_configs")
self.get_cloud_configs.return_value = {"type": self.cloud_type}
self.assertEqual(juju_utils.get_provider_type(),
self.cloud_type)
self.get_cloud_configs.assert_called_once_with(self.cloud_name)
def test_remote_run(self):
_cmd = "do the thing"
# Success
self.assertEqual(juju_utils.remote_run(self.unit, _cmd),
self.run_output["Stdout"])
self.model.run_on_unit.assert_called_once_with(
self.unit, _cmd, timeout=None)
# Non-fatal failure
self.model.run_on_unit.return_value = self.error_run_output
self.assertEqual(juju_utils.remote_run(self.unit, _cmd, fatal=False),
self.error_run_output["Stderr"])
# Fatal failure
with self.assertRaises(Exception):
juju_utils.remote_run(self.unit, _cmd, fatal=True)
def test_get_unit_names(self):
self.patch('zaza.model.get_first_unit_name', new_callable=mock.Mock(),
name='_get_first_unit_name')
juju_utils._get_unit_names(['aunit/0', 'otherunit/0'])
self.assertFalse(self._get_first_unit_name.called)
def test_get_unit_names_called_with_application_name(self):
self.patch_object(juju_utils, 'model')
juju_utils._get_unit_names(['aunit', 'otherunit/0'])
self.model.get_first_unit_name.assert_called()
def test_get_relation_from_unit(self):
self.patch_object(juju_utils, '_get_unit_names')
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
self._get_unit_names.return_value = ['aunit/0', 'otherunit/0']
data = {'foo': 'bar'}
self.model.get_relation_id.return_value = 42
self.model.run_on_unit.return_value = {'Code': 0, 'Stdout': str(data)}
juju_utils.get_relation_from_unit('aunit/0', 'otherunit/0',
'arelation')
self.model.run_on_unit.assert_called_with(
'aunit/0',
'relation-get --format=yaml -r "42" - "otherunit/0"')
self.yaml.safe_load.assert_called_with(str(data))
def test_get_relation_from_unit_fails(self):
self.patch_object(juju_utils, '_get_unit_names')
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
self._get_unit_names.return_value = ['aunit/0', 'otherunit/0']
self.model.get_relation_id.return_value = 42
self.model.run_on_unit.return_value = {'Code': 1, 'Stderr': 'ERROR'}
with self.assertRaises(Exception):
juju_utils.get_relation_from_unit('aunit/0', 'otherunit/0',
'arelation')
self.model.run_on_unit.assert_called_with(
'aunit/0',
'relation-get --format=yaml -r "42" - "otherunit/0"')
self.assertFalse(self.yaml.safe_load.called)
def test_leader_get(self):
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
data = {'foo': 'bar'}
self.model.run_on_leader.return_value = {
'Code': 0, 'Stdout': str(data)}
juju_utils.leader_get('application')
self.model.run_on_leader.assert_called_with(
'application', 'leader-get --format=yaml ')
self.yaml.safe_load.assert_called_with(str(data))
def test_leader_get_key(self):
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
data = {'foo': 'bar'}
self.model.run_on_leader.return_value = {
'Code': 0, 'Stdout': data['foo']}
juju_utils.leader_get('application', 'foo')
self.model.run_on_leader.assert_called_with(
'application', 'leader-get --format=yaml foo')
self.yaml.safe_load.assert_called_with(data['foo'])
def test_leader_get_fails(self):
self.patch_object(juju_utils, 'yaml')
self.patch_object(juju_utils, 'model')
self.model.run_on_leader.return_value = {
'Code': 1, 'Stderr': 'ERROR'}
with self.assertRaises(Exception):
juju_utils.leader_get('application')
self.model.run_on_leader.assert_called_with(
'application', 'leader-get --format=yaml ')
self.assertFalse(self.yaml.safe_load.called)
def test_get_machine_series(self):
self.patch(
'zaza.openstack.utilities.juju.get_machine_status',
new_callable=mock.MagicMock(),
name='_get_machine_status'
)
self._get_machine_status.return_value = 'xenial'
expected = 'xenial'
actual = juju_utils.get_machine_series('6')
self._get_machine_status.assert_called_with(
machine='6',
key='series'
)
self.assertEqual(expected, actual)

View File

@@ -16,6 +16,9 @@ import copy
import datetime
import io
import mock
import subprocess
import sys
import unittest
import tenacity
import unit_tests.utils as ut_utils
@@ -159,7 +162,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
# Already exists
network = openstack_utils.create_external_network(
self.neutronclient, self.project_id, False)
self.neutronclient, self.project_id)
self.assertEqual(network, self.network["network"])
self.neutronclient.create_network.assert_not_called()
@@ -169,7 +172,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
network_msg = copy.deepcopy(self.network)
network_msg["network"].pop("id")
network = openstack_utils.create_external_network(
self.neutronclient, self.project_id, False)
self.neutronclient, self.project_id)
self.assertEqual(network, self.network["network"])
self.neutronclient.create_network.assert_called_once_with(
network_msg)
@@ -190,6 +193,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.patch_object(openstack_utils, 'get_application_config_option')
self.patch_object(openstack_utils, 'get_keystone_ip')
self.patch_object(openstack_utils, "get_current_os_versions")
self.patch_object(openstack_utils, "get_remote_ca_cert_file")
self.patch_object(openstack_utils.juju_utils, 'leader_get')
if tls_relation:
self.patch_object(openstack_utils.model, "scp_from_unit")
@@ -203,6 +207,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.get_relation_id.return_value = None
self.get_application_config_option.return_value = None
self.leader_get.return_value = 'openstack'
self.get_remote_ca_cert_file.return_value = None
if tls_relation or ssl_cert:
port = 35357
transport = 'https'
@@ -244,7 +249,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
'API_VERSION': 3,
}
if tls_relation:
expect['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT
self.get_remote_ca_cert_file.return_value = '/tmp/a.cert'
expect['OS_CACERT'] = '/tmp/a.cert'
self.assertEqual(openstack_utils.get_overcloud_auth(),
expect)
@@ -288,12 +294,22 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
openstack_utils.get_undercloud_keystone_session()
self.get_keystone_session.assert_called_once_with(_auth, verify=None)
def test_get_nova_session_client(self):
session_mock = mock.MagicMock()
self.patch_object(openstack_utils.novaclient_client, "Client")
openstack_utils.get_nova_session_client(session_mock)
self.Client.assert_called_once_with(2, session=session_mock)
self.Client.reset_mock()
openstack_utils.get_nova_session_client(session_mock, version=2.56)
self.Client.assert_called_once_with(2.56, session=session_mock)
def test_get_urllib_opener(self):
self.patch_object(openstack_utils.urllib.request, "ProxyHandler")
self.patch_object(openstack_utils.urllib.request, "HTTPHandler")
self.patch_object(openstack_utils.urllib.request, "build_opener")
self.patch_object(openstack_utils.os, "getenv")
self.getenv.return_value = None
self.patch_object(openstack_utils.deployment_env,
"get_deployment_context",
return_value=dict(TEST_HTTP_PROXY=None))
HTTPHandler_mock = mock.MagicMock()
self.HTTPHandler.return_value = HTTPHandler_mock
openstack_utils.get_urllib_opener()
@@ -304,8 +320,9 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.patch_object(openstack_utils.urllib.request, "ProxyHandler")
self.patch_object(openstack_utils.urllib.request, "HTTPHandler")
self.patch_object(openstack_utils.urllib.request, "build_opener")
self.patch_object(openstack_utils.os, "getenv")
self.getenv.return_value = 'http://squidy'
self.patch_object(openstack_utils.deployment_env,
"get_deployment_context",
return_value=dict(TEST_HTTP_PROXY='http://squidy'))
ProxyHandler_mock = mock.MagicMock()
self.ProxyHandler.return_value = ProxyHandler_mock
openstack_utils.get_urllib_opener()
@@ -366,12 +383,15 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
'e01df65a')
def test__resource_reaches_status_bespoke(self):
client_mock = mock.MagicMock()
resource_mock = mock.MagicMock()
resource_mock.get.return_value = mock.MagicMock(status='readyish')
resource_mock.special_status = 'readyish'
client_mock.get.return_value = resource_mock
openstack_utils._resource_reaches_status(
resource_mock,
client_mock,
'e01df65a',
'readyish')
'readyish',
resource_attribute='special_status')
def test__resource_reaches_status_bespoke_fail(self):
resource_mock = mock.MagicMock()
@@ -501,7 +521,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
glance_mock.images.upload.assert_called_once_with(
'9d1125af',
f(),
)
backend=None)
self.resource_reaches_status.assert_called_once_with(
glance_mock.images,
'9d1125af',
@@ -526,7 +546,12 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.upload_image_to_glance.assert_called_once_with(
glance_mock,
'wibbly/c.img',
'bob')
'bob',
backend=None,
disk_format='qcow2',
visibility='public',
container_format='bare',
force_import=False)
def test_create_image_pass_directory(self):
glance_mock = mock.MagicMock()
@@ -546,7 +571,12 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.upload_image_to_glance.assert_called_once_with(
glance_mock,
'tests/c.img',
'bob')
'bob',
backend=None,
disk_format='qcow2',
visibility='public',
container_format='bare',
force_import=False)
self.gettempdir.assert_not_called()
def test_create_ssh_key(self):
@@ -578,21 +608,27 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
nova_mock.keypairs.create.assert_called_once_with(name='mykeys')
def test_get_private_key_file(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir',
return_value='/tmp/zaza-model1')
self.assertEqual(
openstack_utils.get_private_key_file('mykeys'),
'tests/id_rsa_mykeys')
'/tmp/zaza-model1/id_rsa_mykeys')
def test_write_private_key(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir',
return_value='/tmp/zaza-model1')
m = mock.mock_open()
with mock.patch(
'zaza.openstack.utilities.openstack.open', m, create=False
):
openstack_utils.write_private_key('mykeys', 'keycontents')
m.assert_called_once_with('tests/id_rsa_mykeys', 'w')
m.assert_called_once_with('/tmp/zaza-model1/id_rsa_mykeys', 'w')
handle = m()
handle.write.assert_called_once_with('keycontents')
def test_get_private_key(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir',
return_value='/tmp/zaza-model1')
self.patch_object(openstack_utils.os.path, "isfile",
return_value=True)
m = mock.mock_open(read_data='myprivkey')
@@ -604,6 +640,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
'myprivkey')
def test_get_private_key_file_missing(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir',
return_value='/tmp/zaza-model1')
self.patch_object(openstack_utils.os.path, "isfile",
return_value=False)
self.assertIsNone(openstack_utils.get_private_key('mykeys'))
@@ -664,17 +702,19 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
[])
def test_ping_response(self):
self.patch_object(openstack_utils.subprocess, 'check_call')
self.patch_object(openstack_utils.subprocess, 'run')
openstack_utils.ping_response('10.0.0.10')
self.check_call.assert_called_once_with(
['ping', '-c', '1', '-W', '1', '10.0.0.10'], stdout=-3)
self.run.assert_called_once_with(
['ping', '-c', '1', '-W', '1', '10.0.0.10'], check=True,
stdout=mock.ANY, stderr=mock.ANY)
def test_ping_response_fail(self):
openstack_utils.ping_response.retry.wait = \
tenacity.wait_none()
self.patch_object(openstack_utils.subprocess, 'check_call')
self.check_call.side_effect = Exception()
with self.assertRaises(Exception):
self.patch_object(openstack_utils.subprocess, 'run')
self.run.side_effect = subprocess.CalledProcessError(returncode=42,
cmd='mycmd')
with self.assertRaises(subprocess.CalledProcessError):
openstack_utils.ping_response('10.0.0.10')
def test_ssh_test(self):
@@ -735,7 +775,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
'bob',
'10.0.0.10',
'myvm',
password='reallyhardpassord')
password='reallyhardpassord',
retry=False)
paramiko_mock.connect.assert_called_once_with(
'10.0.0.10',
password='reallyhardpassord',
@@ -759,7 +800,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
privkey='myprivkey')
paramiko_mock.connect.assert_called_once_with(
'10.0.0.10',
password='',
password=None,
pkey='akey',
username='bob')
@@ -803,23 +844,26 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
name='_get_os_version'
)
self.patch(
'zaza.openstack.utilities.juju.get_machines_for_application',
'zaza.utilities.juju.get_machines_for_application',
new_callable=mock.MagicMock(),
name='_get_machines'
)
self.patch(
'zaza.openstack.utilities.juju.get_machine_series',
'zaza.utilities.juju.get_machine_series',
new_callable=mock.MagicMock(),
name='_get_machine_series'
)
_machine = mock.MagicMock()
# No machine returned
self._get_machines.return_value = []
with self.assertRaises(exceptions.ApplicationNotFound):
openstack_utils.get_current_os_release_pair()
self._get_machines.side_effect = None
# No series returned
self._get_machines.return_value = ['6']
self._get_machines.return_value = [_machine]
self._get_machine_series.return_value = None
with self.assertRaises(exceptions.SeriesNotFound):
openstack_utils.get_current_os_release_pair()
@@ -842,7 +886,24 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
result = openstack_utils.get_current_os_release_pair()
self.assertEqual(expected, result)
def test_get_openstack_release(self):
def test_get_current_os_versions(self):
self.patch_object(openstack_utils, "get_openstack_release")
self.patch_object(openstack_utils.generic_utils, "get_pkg_version")
# Pre-Wallaby scenario where openstack-release package isn't installed
self.get_openstack_release.return_value = None
self.get_pkg_version.return_value = '18.0.0'
expected = {'keystone': 'victoria'}
result = openstack_utils.get_current_os_versions('keystone')
self.assertEqual(expected, result)
# Wallaby+ scenario where openstack-release package is installed
self.get_openstack_release.return_value = 'wallaby'
expected = {'keystone': 'wallaby'}
result = openstack_utils.get_current_os_versions('keystone')
self.assertEqual(expected, result)
def test_get_os_release(self):
self.patch(
'zaza.openstack.utilities.openstack.get_current_os_release_pair',
new_callable=mock.MagicMock(),
@@ -871,6 +932,14 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
release_comp = xenial_queens > xenial_mitaka
self.assertTrue(release_comp)
# Check specifying an application
self._get_os_rel_pair.reset_mock()
self._get_os_rel_pair.return_value = 'xenial_mitaka'
expected = 4
result = openstack_utils.get_os_release(application='myapp')
self.assertEqual(expected, result)
self._get_os_rel_pair.assert_called_once_with(application='myapp')
def test_get_keystone_api_version(self):
self.patch_object(openstack_utils, "get_current_os_versions")
self.patch_object(openstack_utils, "get_application_config_option")
@@ -886,6 +955,23 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.get_application_config_option.return_value = None
self.assertEqual(openstack_utils.get_keystone_api_version(), 3)
def test_get_openstack_release(self):
self.patch_object(openstack_utils.model, "get_units")
self.patch_object(openstack_utils.juju_utils, "remote_run")
# Test pre-Wallaby behavior where openstack-release pkg isn't installed
self.get_units.return_value = []
self.remote_run.return_value = "OPENSTACK_CODENAME=wallaby "
# Test Wallaby+ behavior where openstack-release package is installed
unit1 = mock.MagicMock()
unit1.entity_id = 1
self.get_units.return_value = [unit1]
self.remote_run.return_value = "OPENSTACK_CODENAME=wallaby "
result = openstack_utils.get_openstack_release("application", "model")
self.assertEqual(result, "wallaby")
def test_get_project_id(self):
# No domain
self.patch_object(openstack_utils, "get_keystone_api_version")
@@ -1106,7 +1192,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.get_relation_from_unit.assert_called_once_with(
'swift-proxy',
'keystone',
'identity-service')
'identity-service',
model_name=None)
self.get_keystone_session.assert_called_once_with(
{
'OS_AUTH_URL': 'http://10.5.0.61:5000/v3',
@@ -1153,7 +1240,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
self.get_relation_from_unit.assert_called_once_with(
'swift-proxy',
'keystone',
'identity-service')
'identity-service',
model_name=None)
self.get_keystone_session.assert_called_once_with(
{
'OS_AUTH_URL': 'http://10.5.0.36:5000/v2.0',
@@ -1165,3 +1253,280 @@ class TestOpenStackUtils(ut_utils.BaseTestCase):
'OS_PROJECT_NAME': 'services'},
scope='PROJECT',
verify=None)
def test_get_gateway_uuids(self):
self.patch_object(openstack_utils.juju_utils,
'get_machine_uuids_for_application')
self.get_machine_uuids_for_application.return_value = 'ret'
self.assertEquals(openstack_utils.get_gateway_uuids(), 'ret')
self.get_machine_uuids_for_application.assert_called_once_with(
'neutron-gateway')
def test_get_ovs_uuids(self):
self.patch_object(openstack_utils.juju_utils,
'get_machine_uuids_for_application')
self.get_machine_uuids_for_application.return_value = 'ret'
self.assertEquals(openstack_utils.get_ovs_uuids(), 'ret')
self.get_machine_uuids_for_application.assert_called_once_with(
'neutron-openvswitch')
def test_get_ovn_uuids(self):
self.patch_object(openstack_utils.juju_utils,
'get_machine_uuids_for_application')
self.get_machine_uuids_for_application.return_value = ['ret']
self.assertEquals(list(openstack_utils.get_ovn_uuids()),
['ret', 'ret'])
self.get_machine_uuids_for_application.assert_has_calls([
mock.call('ovn-chassis'),
mock.call('ovn-dedicated-chassis'),
])
def test_dvr_enabled(self):
self.patch_object(openstack_utils, 'get_application_config_option')
openstack_utils.dvr_enabled()
self.get_application_config_option.assert_called_once_with(
'neutron-api', 'enable-dvr')
def test_ovn_present(self):
self.patch_object(openstack_utils.model, 'get_application')
self.get_application.side_effect = [None, KeyError]
self.assertTrue(openstack_utils.ovn_present())
self.get_application.side_effect = [KeyError, None]
self.assertTrue(openstack_utils.ovn_present())
self.get_application.side_effect = [KeyError, KeyError]
self.assertFalse(openstack_utils.ovn_present())
def test_ngw_present(self):
self.patch_object(openstack_utils.model, 'get_application')
self.get_application.side_effect = None
self.assertTrue(openstack_utils.ngw_present())
self.get_application.side_effect = KeyError
self.assertFalse(openstack_utils.ngw_present())
def test_get_charm_networking_data(self):
self.patch_object(openstack_utils, 'deprecated_external_networking')
self.patch_object(openstack_utils, 'dvr_enabled')
self.patch_object(openstack_utils, 'ovn_present')
self.patch_object(openstack_utils, 'ngw_present')
self.patch_object(openstack_utils, 'get_ovs_uuids')
self.patch_object(openstack_utils, 'get_gateway_uuids')
self.patch_object(openstack_utils, 'get_ovn_uuids')
self.patch_object(openstack_utils.model, 'get_application')
self.dvr_enabled.return_value = False
self.ovn_present.return_value = False
self.ngw_present.return_value = False
self.get_ovs_uuids.return_value = []
self.get_gateway_uuids.return_value = []
self.get_ovn_uuids.return_value = []
self.get_application.side_effect = KeyError
with self.assertRaises(RuntimeError):
openstack_utils.get_charm_networking_data()
self.ngw_present.return_value = True
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVS,
['neutron-gateway'],
mock.ANY,
'data-port',
{}))
self.dvr_enabled.return_value = True
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVS_DVR,
['neutron-gateway', 'neutron-openvswitch'],
mock.ANY,
'data-port',
{}))
self.ngw_present.return_value = False
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVS_DVR_SNAT,
['neutron-openvswitch'],
mock.ANY,
'data-port',
{}))
self.dvr_enabled.return_value = False
self.ovn_present.return_value = True
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVN,
['ovn-chassis'],
mock.ANY,
'bridge-interface-mappings',
{'ovn-bridge-mappings': 'physnet1:br-ex'}))
self.get_application.side_effect = None
self.assertEquals(
openstack_utils.get_charm_networking_data(),
openstack_utils.CharmedOpenStackNetworkingData(
openstack_utils.OpenStackNetworkingTopology.ML2_OVN,
['ovn-chassis', 'ovn-dedicated-chassis'],
mock.ANY,
'bridge-interface-mappings',
{'ovn-bridge-mappings': 'physnet1:br-ex'}))
def test_get_cacert_absolute_path(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir')
self.get_tmpdir.return_value = '/tmp/default'
self.assertEqual(
openstack_utils.get_cacert_absolute_path('filename'),
'/tmp/default/filename')
def test_get_cacert(self):
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir')
self.get_tmpdir.return_value = '/tmp/default'
self.patch_object(openstack_utils.os.path, 'exists')
results = {
'/tmp/default/vault_juju_ca_cert.crt': True}
self.exists.side_effect = lambda x: results[x]
self.assertEqual(
openstack_utils.get_cacert(),
'/tmp/default/vault_juju_ca_cert.crt')
results = {
'/tmp/default/vault_juju_ca_cert.crt': False,
'/tmp/default/keystone_juju_ca_cert.crt': True}
self.assertEqual(
openstack_utils.get_cacert(),
'/tmp/default/keystone_juju_ca_cert.crt')
results = {
'/tmp/default/vault_juju_ca_cert.crt': False,
'/tmp/default/keystone_juju_ca_cert.crt': False}
self.assertIsNone(openstack_utils.get_cacert())
def test_get_remote_ca_cert_file(self):
self.patch_object(openstack_utils.model, 'get_first_unit_name')
self.patch_object(
openstack_utils,
'_get_remote_ca_cert_file_candidates')
self.patch_object(openstack_utils.model, 'scp_from_unit')
self.patch_object(openstack_utils.os.path, 'exists')
self.patch_object(openstack_utils.shutil, 'move')
self.patch_object(openstack_utils.os, 'chmod')
self.patch_object(openstack_utils.tempfile, 'NamedTemporaryFile')
self.patch_object(openstack_utils.deployment_env, 'get_tmpdir')
self.get_tmpdir.return_value = '/tmp/default'
enter_mock = mock.MagicMock()
enter_mock.__enter__.return_value.name = 'tempfilename'
self.NamedTemporaryFile.return_value = enter_mock
self.get_first_unit_name.return_value = 'neutron-api/0'
self._get_remote_ca_cert_file_candidates.return_value = [
'/tmp/ca1.cert']
self.exists.return_value = True
openstack_utils.get_remote_ca_cert_file('neutron-api')
self.scp_from_unit.assert_called_once_with(
'neutron-api/0',
'/tmp/ca1.cert',
'tempfilename')
self.chmod.assert_called_once_with('/tmp/default/ca1.cert', 0o644)
self.move.assert_called_once_with(
'tempfilename', '/tmp/default/ca1.cert')
class TestAsyncOpenstackUtils(ut_utils.AioTestCase):
def setUp(self):
super(TestAsyncOpenstackUtils, self).setUp()
if sys.version_info < (3, 6, 0):
raise unittest.SkipTest("Can't AsyncMock in py35")
model_mock = mock.MagicMock()
test_mock = mock.MagicMock()
class AsyncContextManagerMock(test_mock):
async def __aenter__(self):
return self
async def __aexit__(self, *args):
pass
self.model_mock = model_mock
self.patch_object(openstack_utils.zaza.model, "async_block_until")
async def _block_until(f, timeout):
# Store the result of the call to _check_ca_present to validate
# tests
self.result = await f()
self.async_block_until.side_effect = _block_until
self.patch('zaza.model.run_in_model', name='_run_in_model')
self._run_in_model.return_value = AsyncContextManagerMock
self._run_in_model().__aenter__.return_value = self.model_mock
async def test_async_block_until_ca_exists(self):
def _get_action_output(stdout, code, stderr=None):
stderr = stderr or ''
action = mock.MagicMock()
action.data = {
'results': {
'Code': code,
'Stderr': stderr,
'Stdout': stdout}}
return action
results = {
'/tmp/missing.cert': _get_action_output(
'',
'1',
'cat: /tmp/missing.cert: No such file or directory'),
'/tmp/good.cert': _get_action_output('CERTIFICATE', '0')}
async def _run(command, timeout=None):
return results[command.split()[-1]]
self.unit1 = mock.MagicMock()
self.unit2 = mock.MagicMock()
self.unit2.run.side_effect = _run
self.unit1.run.side_effect = _run
self.units = [self.unit1, self.unit2]
_units = mock.MagicMock()
_units.units = self.units
self.model_mock.applications = {
'keystone': _units
}
self.patch_object(
openstack_utils,
"_async_get_remote_ca_cert_file_candidates")
# Test a missing cert then a good cert.
self._async_get_remote_ca_cert_file_candidates.return_value = [
'/tmp/missing.cert',
'/tmp/good.cert']
await openstack_utils.async_block_until_ca_exists(
'keystone',
'CERTIFICATE')
self.assertTrue(self.result)
# Test a single missing
self._async_get_remote_ca_cert_file_candidates.return_value = [
'/tmp/missing.cert']
await openstack_utils.async_block_until_ca_exists(
'keystone',
'CERTIFICATE')
self.assertFalse(self.result)
async def test__async_get_remote_ca_cert_file_candidates(self):
self.patch_object(openstack_utils.zaza.model, "async_get_relation_id")
rel_id_out = {
}
def _get_relation_id(app, cert_app, model_name, remote_interface_name):
return rel_id_out[cert_app]
self.async_get_relation_id.side_effect = _get_relation_id
rel_id_out['vault'] = 'certs:1'
r = await openstack_utils._async_get_remote_ca_cert_file_candidates(
'neutron-api', 'mymodel')
self.assertEqual(
r,
['/usr/local/share/ca-certificates/vault_juju_ca_cert.crt',
'/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'])
rel_id_out['vault'] = None
r = await openstack_utils._async_get_remote_ca_cert_file_candidates(
'neutron-api', 'mymodel')
self.assertEqual(
r,
['/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'])

View File

@@ -0,0 +1,288 @@
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unit_tests.utils as ut_utils
import zaza.openstack.utilities.openstack_upgrade as openstack_upgrade
class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase):
async def _arun_action_on_units(self, units, cmd, model_name=None,
raise_on_failure=True):
pass
def setUp(self):
super(TestOpenStackUpgradeUtils, self).setUp()
self.patch_object(
openstack_upgrade.zaza.model,
"async_run_action_on_units")
self.async_run_action_on_units.side_effect = self._arun_action_on_units
self.patch_object(
openstack_upgrade.zaza.model,
"get_units")
self.juju_status = mock.MagicMock()
self.patch_object(
openstack_upgrade.zaza.model,
"get_status",
return_value=self.juju_status)
self.patch_object(
openstack_upgrade.zaza.model,
"set_application_config")
self.patch_object(
openstack_upgrade.zaza.model,
"get_application_config")
self.patch_object(
openstack_upgrade.zaza.model,
"block_until_all_units_idle")
self.patch_object(
openstack_upgrade,
"block_until_mysql_innodb_cluster_has_rw")
def _get_application_config(app, model_name=None):
app_config = {
'ceph-mon': {'verbose': {'value': True},
'source': {'value': 'old-src'}},
'neutron-openvswitch': {'verbose': {'value': True}},
'ntp': {'verbose': {'value': True}},
'percona-cluster': {'verbose': {'value': True},
'source': {'value': 'old-src'}},
'cinder': {
'verbose': {'value': True},
'openstack-origin': {'value': 'old-src'},
'action-managed-upgrade': {'value': False}},
'neutron-api': {
'verbose': {'value': True},
'openstack-origin': {'value': 'old-src'},
'action-managed-upgrade': {'value': False}},
'nova-compute': {
'verbose': {'value': True},
'openstack-origin': {'value': 'old-src'},
'action-managed-upgrade': {'value': False}},
'mysql-innodb-cluster': {
'verbose': {'value': True},
'source': {'value': 'old-src'},
'action-managed-upgrade': {'value': True}},
}
return app_config[app]
self.get_application_config.side_effect = _get_application_config
self.juju_status.applications = {
'mydb': { # Filter as it is on UPGRADE_EXCLUDE_LIST
'charm': 'cs:percona-cluster'},
'neutron-openvswitch': { # Filter as it is a subordinates
'charm': 'cs:neutron-openvswitch',
'subordinate-to': 'nova-compute'},
'ntp': { # Filter as it has no source option
'charm': 'cs:ntp'},
'mysql-innodb-cluster': {
'charm': 'cs:mysql-innodb-cluster',
'units': {
'mysql-innodb-cluster/0': {}}},
'nova-compute': {
'charm': 'cs:nova-compute',
'units': {
'nova-compute/0': {
'subordinates': {
'neutron-openvswitch/2': {
'charm': 'cs:neutron-openvswitch-22'}}}}},
'cinder': {
'charm': 'cs:cinder-23',
'units': {
'cinder/1': {
'subordinates': {
'cinder-hacluster/0': {
'charm': 'cs:hacluster-42'},
'cinder-ceph/3': {
'charm': 'cs:cinder-ceph-2'}}}}}}
def test_pause_units(self):
openstack_upgrade.pause_units(['cinder/1', 'glance/2'])
self.async_run_action_on_units.assert_called_once_with(
['cinder/1', 'glance/2'],
'pause',
model_name=None,
raise_on_failure=True)
def test_resume_units(self):
openstack_upgrade.resume_units(['cinder/1', 'glance/2'])
self.async_run_action_on_units.assert_called_once_with(
['cinder/1', 'glance/2'],
'resume',
model_name=None,
raise_on_failure=True)
def test_action_unit_upgrade(self):
openstack_upgrade.action_unit_upgrade(['cinder/1', 'glance/2'])
self.async_run_action_on_units.assert_called_once_with(
['cinder/1', 'glance/2'],
'openstack-upgrade',
model_name=None,
raise_on_failure=True)
def test_action_upgrade_apps(self):
self.patch_object(openstack_upgrade, "pause_units")
self.patch_object(openstack_upgrade, "action_unit_upgrade")
self.patch_object(openstack_upgrade, "resume_units")
mock_nova_compute_0 = mock.MagicMock()
mock_nova_compute_0.entity_id = 'nova-compute/0'
mock_cinder_1 = mock.MagicMock()
mock_cinder_1.entity_id = 'cinder/1'
units = {
'nova-compute': [mock_nova_compute_0],
'cinder': [mock_cinder_1]}
self.get_units.side_effect = lambda app, model_name: units[app]
openstack_upgrade.action_upgrade_apps(['nova-compute', 'cinder'])
pause_calls = [
mock.call(['cinder-hacluster/0'], model_name=None),
mock.call(['nova-compute/0', 'cinder/1'], model_name=None)]
self.pause_units.assert_has_calls(pause_calls, any_order=False)
action_unit_upgrade_calls = [
mock.call(['nova-compute/0', 'cinder/1'], model_name=None)]
self.action_unit_upgrade.assert_has_calls(
action_unit_upgrade_calls,
any_order=False)
resume_calls = [
mock.call(['nova-compute/0', 'cinder/1'], model_name=None),
mock.call(['cinder-hacluster/0'], model_name=None)]
self.resume_units.assert_has_calls(resume_calls, any_order=False)
def test_action_upgrade_apps_mysql_innodb_cluster(self):
"""Verify that mysql-innodb-cluster is settled before complete."""
self.patch_object(openstack_upgrade, "pause_units")
self.patch_object(openstack_upgrade, "action_unit_upgrade")
self.patch_object(openstack_upgrade, "resume_units")
mock_mysql_innodb_cluster_0 = mock.MagicMock()
mock_mysql_innodb_cluster_0.entity_id = 'mysql-innodb-cluster/0'
units = {'mysql-innodb-cluster': [mock_mysql_innodb_cluster_0]}
self.get_units.side_effect = lambda app, model_name: units[app]
openstack_upgrade.action_upgrade_apps(['mysql-innodb-cluster'])
pause_calls = [
mock.call(['mysql-innodb-cluster/0'], model_name=None)]
self.pause_units.assert_has_calls(pause_calls, any_order=False)
action_unit_upgrade_calls = [
mock.call(['mysql-innodb-cluster/0'], model_name=None)]
self.action_unit_upgrade.assert_has_calls(
action_unit_upgrade_calls,
any_order=False)
resume_calls = [
mock.call(['mysql-innodb-cluster/0'], model_name=None)]
self.resume_units.assert_has_calls(resume_calls, any_order=False)
self.block_until_mysql_innodb_cluster_has_rw.assert_called_once_with(
None)
def test_set_upgrade_application_config(self):
openstack_upgrade.set_upgrade_application_config(
['neutron-api', 'cinder'],
'new-src')
set_app_calls = [
mock.call(
'neutron-api',
{
'openstack-origin': 'new-src',
'action-managed-upgrade': 'True'},
model_name=None),
mock.call(
'cinder',
{
'openstack-origin': 'new-src',
'action-managed-upgrade': 'True'},
model_name=None)]
self.set_application_config.assert_has_calls(set_app_calls)
self.set_application_config.reset_mock()
openstack_upgrade.set_upgrade_application_config(
['percona-cluster'],
'new-src',
action_managed=False)
self.set_application_config.assert_called_once_with(
'percona-cluster',
{'source': 'new-src'},
model_name=None)
def test_is_action_upgradable(self):
self.assertTrue(
openstack_upgrade.is_action_upgradable('cinder'))
self.assertFalse(
openstack_upgrade.is_action_upgradable('percona-cluster'))
def test_is_already_upgraded(self):
self.assertTrue(
openstack_upgrade.is_already_upgraded('cinder', 'old-src'))
self.assertFalse(
openstack_upgrade.is_already_upgraded('cinder', 'new-src'))
def test_run_action_upgrade(self):
self.patch_object(openstack_upgrade, "set_upgrade_application_config")
self.patch_object(openstack_upgrade, "action_upgrade_apps")
openstack_upgrade.run_action_upgrades(
['cinder', 'neutron-api'],
'new-src')
self.set_upgrade_application_config.assert_called_once_with(
['cinder', 'neutron-api'],
'new-src',
model_name=None)
self.action_upgrade_apps.assert_called_once_with(
['cinder', 'neutron-api'],
model_name=None)
def test_run_all_in_one_upgrade(self):
self.patch_object(openstack_upgrade, "set_upgrade_application_config")
self.patch_object(
openstack_upgrade.zaza.model,
'block_until_all_units_idle')
openstack_upgrade.run_all_in_one_upgrades(
['percona-cluster'],
'new-src')
self.set_upgrade_application_config.assert_called_once_with(
['percona-cluster'],
'new-src',
action_managed=False,
model_name=None)
self.block_until_all_units_idle.assert_called_once_with()
def test_run_upgrade(self):
self.patch_object(openstack_upgrade, "run_all_in_one_upgrades")
self.patch_object(openstack_upgrade, "run_action_upgrades")
openstack_upgrade.run_upgrade_on_apps(
['cinder', 'neutron-api', 'ceph-mon'],
'new-src')
self.run_all_in_one_upgrades.assert_called_once_with(
['ceph-mon'],
'new-src',
model_name=None)
self.run_action_upgrades.assert_called_once_with(
['cinder', 'neutron-api'],
'new-src',
model_name=None)
def test_run_upgrade_tests(self):
self.patch_object(openstack_upgrade, "run_upgrade_on_apps")
self.patch_object(openstack_upgrade, "get_upgrade_groups")
self.get_upgrade_groups.return_value = [
('Compute', ['nova-compute']),
('Control Plane', ['cinder', 'neutron-api']),
('Core Identity', ['keystone']),
('Storage', ['ceph-mon']),
('sweep_up', ['designate'])]
openstack_upgrade.run_upgrade_tests('new-src', model_name=None)
run_upgrade_calls = [
mock.call(['nova-compute'], 'new-src', model_name=None),
mock.call(['cinder', 'neutron-api'], 'new-src', model_name=None),
mock.call(['keystone'], 'new-src', model_name=None),
mock.call(['ceph-mon'], 'new-src', model_name=None),
mock.call(['designate'], 'new-src', model_name=None),
]
self.run_upgrade_on_apps.assert_has_calls(
run_upgrade_calls, any_order=False)

View File

@@ -0,0 +1,586 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import sys
import unittest
import unit_tests.utils as ut_utils
import zaza.openstack.utilities.generic as generic_utils
import zaza.openstack.utilities.series_upgrade as series_upgrade
import zaza.openstack.utilities.parallel_series_upgrade as upgrade_utils
FAKE_STATUS = {
'can-upgrade-to': '',
'charm': 'local:trusty/app-136',
'subordinate-to': [],
'units': {'app/0': {'leader': True,
'machine': '0',
'subordinates': {
'app-hacluster/0': {
'charm': 'local:trusty/hacluster-0',
'leader': True}}},
'app/1': {'machine': '1',
'subordinates': {
'app-hacluster/1': {
'charm': 'local:trusty/hacluster-0'}}},
'app/2': {'machine': '2',
'subordinates': {
'app-hacluster/2': {
'charm': 'local:trusty/hacluster-0'}}}}}
FAKE_STATUS_MONGO = {
'can-upgrade-to': '',
'charm': 'local:trusty/mongodb-10',
'subordinate-to': [],
'units': {'mongo/0': {'leader': True,
'machine': '0',
'subordinates': {}},
'mongo/1': {'machine': '1',
'subordinates': {}},
'mongo/2': {'machine': '2',
'subordinates': {}}}}
class Test_ParallelSeriesUpgradeSync(ut_utils.BaseTestCase):
def setUp(self):
super(Test_ParallelSeriesUpgradeSync, self).setUp()
# Juju Status Object and data
# self.juju_status = mock.MagicMock()
# self.juju_status.applications.__getitem__.return_value = FAKE_STATUS
# self.patch_object(upgrade_utils, "model")
# self.model.get_status.return_value = self.juju_status
def test_get_leader_and_non_leaders(self):
expected = ({
'app/0': {
'leader': True,
'machine': '0',
'subordinates': {
'app-hacluster/0': {
'charm': 'local:trusty/hacluster-0',
'leader': True}}}}, {
'app/1': {
'machine': '1',
'subordinates': {
'app-hacluster/1': {
'charm': 'local:trusty/hacluster-0'}}},
'app/2': {
'machine': '2',
'subordinates': {
'app-hacluster/2': {
'charm': 'local:trusty/hacluster-0'}}}})
self.assertEqual(
expected,
upgrade_utils.get_leader_and_non_leaders(FAKE_STATUS)
)
def test_app_config_openstack_charm(self):
expected = {
'origin': 'openstack-origin',
'pause_non_leader_subordinate': True,
'pause_non_leader_primary': True,
'post_upgrade_functions': [],
'pre_upgrade_functions': [],
'post_application_upgrade_functions': [],
'follower_first': False, }
config = upgrade_utils.app_config('keystone')
self.assertEqual(expected, config)
def test_app_config_mongo(self):
expected = {
'origin': None,
'pause_non_leader_subordinate': True,
'pause_non_leader_primary': True,
'post_upgrade_functions': [],
'pre_upgrade_functions': [],
'post_application_upgrade_functions': [],
'follower_first': True, }
config = upgrade_utils.app_config('mongodb')
self.assertEqual(expected, config)
def test_app_config_ceph(self):
expected = {
'origin': 'source',
'pause_non_leader_subordinate': False,
'pause_non_leader_primary': False,
'post_upgrade_functions': [],
'pre_upgrade_functions': [],
'post_application_upgrade_functions': [],
'follower_first': False, }
config = upgrade_utils.app_config('ceph-mon')
self.assertEqual(expected, config)
def test_app_config_percona(self):
expected = {
'origin': 'source',
'pause_non_leader_subordinate': True,
'pause_non_leader_primary': True,
'post_upgrade_functions': [],
'pre_upgrade_functions': [],
'post_application_upgrade_functions': [
('zaza.openstack.charm_tests.mysql.utils.'
'complete_cluster_series_upgrade')
],
'follower_first': False, }
config = upgrade_utils.app_config('percona-cluster')
self.assertEqual(expected, config)
class TestParallelSeriesUpgrade(ut_utils.AioTestCase):
def setUp(self):
super(TestParallelSeriesUpgrade, self).setUp()
if sys.version_info < (3, 6, 0):
raise unittest.SkipTest("Can't AsyncMock in py35")
self.patch_object(series_upgrade, "async_prepare_series_upgrade")
self.patch_object(generic_utils, 'check_call')
# Juju Status Object and data
self.juju_status = mock.AsyncMock()
self.juju_status.return_value.applications.__getitem__.return_value = \
FAKE_STATUS
self.patch_object(upgrade_utils, "model")
self.model.async_get_status = self.juju_status
self.async_run_action = mock.AsyncMock()
self.model.async_run_action = self.async_run_action
self.async_block_until = mock.AsyncMock()
self.model.async_block_until = self.async_block_until
self.model.async_wait_for_unit_idle = mock.AsyncMock()
self.async_run_on_machine = mock.AsyncMock()
self.model.async_run_on_machine = self.async_run_on_machine
self.model.async_block_until_units_on_machine_are_idle = \
mock.AsyncMock()
@mock.patch.object(upgrade_utils.cl_utils, 'get_class')
async def test_run_post_application_upgrade_functions(
self,
mock_get_class
):
called = mock.AsyncMock()
mock_get_class.return_value = called
await upgrade_utils.run_post_application_upgrade_functions(
['my.thing'])
mock_get_class.assert_called_once_with('my.thing')
called.assert_called()
@mock.patch.object(upgrade_utils.cl_utils, 'get_class')
async def test_run_pre_upgrade_functions(self, mock_get_class):
called = mock.AsyncMock()
mock_get_class.return_value = called
await upgrade_utils.run_pre_upgrade_functions('1', ['my.thing'])
mock_get_class.assert_called_once_with('my.thing')
called.assert_called_once_with('1')
@mock.patch.object(upgrade_utils, 'run_post_application_upgrade_functions')
@mock.patch.object(
upgrade_utils.series_upgrade_utils, 'async_prepare_series_upgrade')
@mock.patch.object(upgrade_utils.series_upgrade_utils, 'async_set_series')
@mock.patch.object(upgrade_utils, 'maybe_pause_things')
@mock.patch.object(upgrade_utils, 'series_upgrade_machine')
async def test_parallel_series_upgrade_mongo(
self,
mock_series_upgrade_machine,
mock_maybe_pause_things,
mock_async_set_series,
mock_async_prepare_series_upgrade,
mock_post_application_upgrade_functions,
):
self.juju_status.return_value.applications.__getitem__.return_value = \
FAKE_STATUS_MONGO
upgrade_config = upgrade_utils.app_config('mongodb')
await upgrade_utils.parallel_series_upgrade(
'mongodb',
from_series='trusty',
to_series='xenial',
**upgrade_config
)
mock_async_set_series.assert_called_once_with(
'mongodb', to_series='xenial')
self.juju_status.assert_called()
# The below is using `any_order=True` because the ordering is
# undetermined and differs between python versions
mock_async_prepare_series_upgrade.assert_has_calls([
mock.call('1', to_series='xenial'),
mock.call('2', to_series='xenial'),
mock.call('0', to_series='xenial'),
], any_order=True)
mock_maybe_pause_things.assert_called()
mock_series_upgrade_machine.assert_has_calls([
mock.call(
'1',
origin=None,
application='mongodb',
files=None,
workaround_script=None,
post_upgrade_functions=[]),
mock.call(
'2',
origin=None,
application='mongodb',
files=None,
workaround_script=None,
post_upgrade_functions=[]),
mock.call(
'0',
origin=None,
application='mongodb',
files=None,
workaround_script=None,
post_upgrade_functions=[]),
])
mock_post_application_upgrade_functions.assert_called_once_with([])
@mock.patch.object(upgrade_utils, 'run_post_application_upgrade_functions')
@mock.patch.object(
upgrade_utils.series_upgrade_utils, 'async_prepare_series_upgrade')
@mock.patch.object(upgrade_utils.series_upgrade_utils, 'async_set_series')
@mock.patch.object(upgrade_utils, 'maybe_pause_things')
@mock.patch.object(upgrade_utils, 'series_upgrade_machine')
async def test_serial_series_upgrade_mongo(
self,
mock_series_upgrade_machine,
mock_maybe_pause_things,
mock_async_set_series,
mock_async_prepare_series_upgrade,
mock_post_application_upgrade_functions,
):
self.juju_status.return_value.applications.__getitem__.return_value = \
FAKE_STATUS_MONGO
upgrade_config = upgrade_utils.app_config('mongodb')
await upgrade_utils.serial_series_upgrade(
'mongodb',
from_series='trusty',
to_series='xenial',
**upgrade_config
)
mock_async_set_series.assert_called_once_with(
'mongodb', to_series='xenial')
self.juju_status.assert_called()
mock_async_prepare_series_upgrade.assert_has_calls([
mock.call('1', to_series='xenial'),
mock.call('2', to_series='xenial'),
mock.call('0', to_series='xenial'),
])
mock_maybe_pause_things.assert_called()
mock_series_upgrade_machine.assert_has_calls([
mock.call(
'1',
origin=None,
application='mongodb',
files=None,
workaround_script=None,
post_upgrade_functions=[]),
mock.call(
'2',
origin=None,
application='mongodb',
files=None,
workaround_script=None,
post_upgrade_functions=[]),
mock.call(
'0',
origin=None,
application='mongodb',
files=None,
workaround_script=None,
post_upgrade_functions=[]),
])
mock_post_application_upgrade_functions.assert_called_once_with([])
@mock.patch.object(upgrade_utils, 'run_post_application_upgrade_functions')
@mock.patch.object(
upgrade_utils.series_upgrade_utils, 'async_prepare_series_upgrade')
@mock.patch.object(upgrade_utils.series_upgrade_utils, 'async_set_series')
@mock.patch.object(upgrade_utils, 'maybe_pause_things')
@mock.patch.object(upgrade_utils, 'series_upgrade_machine')
async def test_parallel_series_upgrade(
self,
mock_series_upgrade_machine,
mock_maybe_pause_things,
mock_async_set_series,
mock_async_prepare_series_upgrade,
mock_post_application_upgrade_functions,
):
await upgrade_utils.parallel_series_upgrade(
'app',
from_series='trusty',
to_series='xenial',
)
mock_async_set_series.assert_called_once_with(
'app', to_series='xenial')
self.juju_status.assert_called()
# The below is using `any_order=True` because the ordering is
# undetermined and differs between python versions
mock_async_prepare_series_upgrade.assert_has_calls([
mock.call('1', to_series='xenial'),
mock.call('2', to_series='xenial'),
mock.call('0', to_series='xenial'),
], any_order=True)
mock_maybe_pause_things.assert_called()
mock_series_upgrade_machine.assert_has_calls([
mock.call(
'1',
origin='openstack-origin',
application='app',
files=None,
workaround_script=None,
post_upgrade_functions=None),
mock.call(
'2',
origin='openstack-origin',
application='app',
files=None,
workaround_script=None,
post_upgrade_functions=None),
mock.call(
'0',
origin='openstack-origin',
application='app',
files=None,
workaround_script=None,
post_upgrade_functions=None),
])
mock_post_application_upgrade_functions.assert_called_once_with(None)
@mock.patch.object(upgrade_utils, 'run_post_application_upgrade_functions')
@mock.patch.object(
upgrade_utils.series_upgrade_utils, 'async_prepare_series_upgrade')
@mock.patch.object(upgrade_utils.series_upgrade_utils, 'async_set_series')
@mock.patch.object(upgrade_utils, 'maybe_pause_things')
@mock.patch.object(upgrade_utils, 'series_upgrade_machine')
async def test_serial_series_upgrade(
self,
mock_series_upgrade_machine,
mock_maybe_pause_things,
mock_async_set_series,
mock_async_prepare_series_upgrade,
mock_post_application_upgrade_functions,
):
await upgrade_utils.serial_series_upgrade(
'app',
from_series='trusty',
to_series='xenial',
)
mock_async_set_series.assert_called_once_with(
'app', to_series='xenial')
self.juju_status.assert_called()
mock_async_prepare_series_upgrade.assert_has_calls([
mock.call('0', to_series='xenial'),
mock.call('1', to_series='xenial'),
mock.call('2', to_series='xenial'),
])
mock_maybe_pause_things.assert_called()
mock_series_upgrade_machine.assert_has_calls([
mock.call(
'0',
origin='openstack-origin',
application='app',
files=None,
workaround_script=None,
post_upgrade_functions=None),
mock.call(
'1',
origin='openstack-origin',
application='app',
files=None,
workaround_script=None,
post_upgrade_functions=None),
mock.call(
'2',
origin='openstack-origin',
application='app',
files=None,
workaround_script=None,
post_upgrade_functions=None),
])
mock_post_application_upgrade_functions.assert_called_once_with(None)
@mock.patch.object(upgrade_utils, 'add_confdef_file')
@mock.patch.object(upgrade_utils, 'remove_confdef_file')
@mock.patch.object(
upgrade_utils.series_upgrade_utils, 'async_complete_series_upgrade')
@mock.patch.object(upgrade_utils, 'reboot')
@mock.patch.object(upgrade_utils, 'async_do_release_upgrade')
@mock.patch.object(upgrade_utils, 'async_dist_upgrade')
async def test_series_upgrade_machine(
self,
mock_async_dist_upgrade,
mock_async_do_release_upgrade,
mock_reboot,
mock_async_complete_series_upgrade,
mock_remove_confdef_file,
mock_add_confdef_file
):
await upgrade_utils.series_upgrade_machine(
'1',
post_upgrade_functions=None,
pre_upgrade_functions=None,
files=None,
workaround_script=None)
mock_async_dist_upgrade.assert_called_once_with('1')
mock_async_do_release_upgrade.assert_called_once_with('1')
mock_reboot.assert_called_once_with('1')
mock_async_complete_series_upgrade.assert_called_once_with('1')
mock_remove_confdef_file.assert_called_once_with('1')
mock_add_confdef_file.assert_called_once_with('1')
@mock.patch.object(upgrade_utils, 'add_confdef_file')
@mock.patch.object(upgrade_utils, 'remove_confdef_file')
@mock.patch.object(upgrade_utils.os_utils, 'async_set_origin')
@mock.patch.object(
upgrade_utils.series_upgrade_utils, 'async_complete_series_upgrade')
@mock.patch.object(upgrade_utils, 'reboot')
@mock.patch.object(upgrade_utils, 'async_do_release_upgrade')
@mock.patch.object(upgrade_utils, 'async_dist_upgrade')
async def test_series_upgrade_machine_with_source(
self,
mock_async_dist_upgrade,
mock_async_do_release_upgrade,
mock_reboot,
mock_async_complete_series_upgrade,
mock_async_set_origin,
mock_remove_confdef_file,
mock_add_confdef_file
):
await upgrade_utils.series_upgrade_machine(
'1',
origin='openstack-origin',
application='app',
post_upgrade_functions=None,
pre_upgrade_functions=None,
files=None,
workaround_script=None)
mock_async_dist_upgrade.assert_called_once_with('1')
mock_async_do_release_upgrade.assert_called_once_with('1')
mock_reboot.assert_called_once_with('1')
mock_async_complete_series_upgrade.assert_called_once_with('1')
mock_async_set_origin.assert_called_once_with(
'app', 'openstack-origin')
mock_remove_confdef_file.assert_called_once_with('1')
mock_add_confdef_file.assert_called_once_with('1')
@mock.patch("asyncio.gather")
async def test_maybe_pause_things_primary(self, mock_gather):
async def _gather(*args):
for f in args:
await f
mock_gather.side_effect = _gather
await upgrade_utils.maybe_pause_things(
FAKE_STATUS,
['app/1', 'app/2'],
pause_non_leader_subordinate=False,
pause_non_leader_primary=True)
self.async_run_action.assert_has_calls([
mock.call('app/1', "pause", action_params={}),
mock.call('app/2', "pause", action_params={}),
])
@mock.patch("asyncio.gather")
async def test_maybe_pause_things_subordinates(self, mock_gather):
async def _gather(*args):
for f in args:
await f
mock_gather.side_effect = _gather
await upgrade_utils.maybe_pause_things(
FAKE_STATUS,
['app/1', 'app/2'],
pause_non_leader_subordinate=True,
pause_non_leader_primary=False)
self.async_run_action.assert_has_calls([
mock.call('app-hacluster/1', "pause", action_params={}),
mock.call('app-hacluster/2', "pause", action_params={}),
])
@mock.patch("asyncio.gather")
async def test_maybe_pause_things_all(self, mock_gather):
async def _gather(*args):
for f in args:
await f
mock_gather.side_effect = _gather
await upgrade_utils.maybe_pause_things(
FAKE_STATUS,
['app/1', 'app/2'],
pause_non_leader_subordinate=True,
pause_non_leader_primary=True)
self.async_run_action.assert_has_calls([
mock.call('app-hacluster/1', "pause", action_params={}),
mock.call('app/1', "pause", action_params={}),
mock.call('app-hacluster/2', "pause", action_params={}),
mock.call('app/2', "pause", action_params={}),
])
async def test_maybe_pause_things_none(self):
await upgrade_utils.maybe_pause_things(
FAKE_STATUS,
['app/1', 'app/2'],
pause_non_leader_subordinate=False,
pause_non_leader_primary=False)
self.async_run_action.assert_not_called()
async def test_add_confdef_file(self):
await upgrade_utils.add_confdef_file('1')
cmd = (
"""echo """
"""'DPkg::options { "--force-confdef"; "--force-confnew"; }' | """
"""sudo tee /etc/apt/apt.conf.d/local"""
)
self.async_run_on_machine.assert_called_once_with(
'1', cmd
)
async def test_remove_confdef_file(self):
await upgrade_utils.remove_confdef_file('1')
self.async_run_on_machine.assert_called_once_with(
'1', 'sudo rm /etc/apt/apt.conf.d/local'
)
async def test_async_do_release_upgrade(self):
await upgrade_utils.async_do_release_upgrade('1')
do_release_upgrade_cmd = (
'yes | sudo DEBIAN_FRONTEND=noninteractive '
'do-release-upgrade -f DistUpgradeViewNonInteractive')
self.async_run_on_machine.assert_called_once_with(
'1', do_release_upgrade_cmd, timeout='120m'
)
async def test_prepare_series_upgrade(self):
await upgrade_utils.prepare_series_upgrade(
'1', to_series='xenial'
)
self.async_prepare_series_upgrade.assert_called_once_with(
'1', to_series='xenial'
)
async def test_reboot(self):
await upgrade_utils.reboot('1')
self.async_run_on_machine.assert_called_once_with(
'1', 'sudo init 6 & exit'
)
async def test_async_dist_upgrade(self):
await upgrade_utils.async_dist_upgrade('1')
apt_update_command = (
"""yes | sudo DEBIAN_FRONTEND=noninteractive """
"""apt-get --assume-yes """
"""-o "Dpkg::Options::=--force-confdef" """
"""-o "Dpkg::Options::=--force-confold" dist-upgrade""")
self.async_run_on_machine.assert_has_calls([
mock.call('1', 'sudo apt-get update'),
mock.call('1', apt_update_command),
])

View File

@@ -0,0 +1,275 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unit_tests.utils as ut_utils
import zaza.openstack.utilities.generic as generic_utils
import zaza.openstack.utilities.series_upgrade as series_upgrade_utils
FAKE_STATUS = {
'can-upgrade-to': '',
'charm': 'local:trusty/app-136',
'subordinate-to': [],
'units': {'app/0': {'leader': True,
'machine': '0',
'subordinates': {
'app-hacluster/0': {
'charm': 'local:trusty/hacluster-0',
'leader': True}}},
'app/1': {'machine': '1',
'subordinates': {
'app-hacluster/1': {
'charm': 'local:trusty/hacluster-0'}}},
'app/2': {'machine': '2',
'subordinates': {
'app-hacluster/2': {
'charm': 'local:trusty/hacluster-0'}}}}}
class TestSeriesUpgrade(ut_utils.BaseTestCase):
def setUp(self):
super(TestSeriesUpgrade, self).setUp()
# Patch all subprocess calls
self.patch(
'zaza.openstack.utilities.generic.subprocess',
new_callable=mock.MagicMock(),
name='subprocess'
)
self.patch_object(generic_utils, "run_via_ssh")
# Juju Status Object and data
self.juju_status = mock.MagicMock()
self.juju_status.applications.__getitem__.return_value = FAKE_STATUS
self.patch_object(series_upgrade_utils, "model")
self.model.get_status.return_value = self.juju_status
def test_series_upgrade(self):
self.patch_object(
series_upgrade_utils.model, "block_until_all_units_idle")
self.patch_object(
series_upgrade_utils.model, "block_until_unit_wl_status")
self.patch_object(series_upgrade_utils.model, "prepare_series_upgrade")
self.patch_object(
series_upgrade_utils.model, "complete_series_upgrade")
self.patch_object(series_upgrade_utils.model, "set_series")
self.patch_object(generic_utils, "set_origin")
self.patch_object(series_upgrade_utils, "wrap_do_release_upgrade")
self.patch_object(generic_utils, "reboot")
_unit = "app/2"
_application = "app"
_machine_num = "4"
_from_series = "xenial"
_to_series = "bionic"
_origin = "source"
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
series_upgrade_utils.series_upgrade(
_unit, _machine_num, origin=_origin,
to_series=_to_series, from_series=_from_series,
workaround_script=_workaround_script, files=_files)
self.block_until_all_units_idle.called_with()
self.prepare_series_upgrade.assert_called_once_with(
_machine_num, to_series=_to_series)
self.wrap_do_release_upgrade.assert_called_once_with(
_unit, to_series=_to_series, from_series=_from_series,
workaround_script=_workaround_script, files=_files)
self.complete_series_upgrade.assert_called_once_with(_machine_num)
self.set_series.assert_called_once_with(_application, _to_series)
self.set_origin.assert_called_once_with(_application, _origin)
self.reboot.assert_called_once_with(_unit)
def test_series_upgrade_application_pause_peers_and_subordinates(self):
self.patch_object(series_upgrade_utils.model, "run_action")
self.patch_object(series_upgrade_utils, "series_upgrade")
_application = "app"
_from_series = "xenial"
_to_series = "bionic"
_origin = "source"
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
_completed_machines = []
# Peers and Subordinates
_run_action_calls = [
mock.call("{}-hacluster/1".format(_application),
"pause", action_params={}),
mock.call("{}/1".format(_application), "pause", action_params={}),
mock.call("{}-hacluster/2".format(_application),
"pause", action_params={}),
mock.call("{}/2".format(_application), "pause", action_params={}),
]
_series_upgrade_calls = []
for machine_num in ("0", "1", "2"):
_series_upgrade_calls.append(
mock.call("{}/{}".format(_application, machine_num),
machine_num, origin=_origin,
from_series=_from_series, to_series=_to_series,
workaround_script=_workaround_script, files=_files,
post_upgrade_functions=None),
)
# Pause primary peers and subordinates
series_upgrade_utils.series_upgrade_application(
_application, origin=_origin,
to_series=_to_series, from_series=_from_series,
pause_non_leader_primary=True,
pause_non_leader_subordinate=True,
completed_machines=_completed_machines,
workaround_script=_workaround_script, files=_files),
self.run_action.assert_has_calls(_run_action_calls)
self.series_upgrade.assert_has_calls(_series_upgrade_calls)
def test_series_upgrade_application_pause_subordinates(self):
self.patch_object(series_upgrade_utils.model, "run_action")
self.patch_object(series_upgrade_utils, "series_upgrade")
_application = "app"
_from_series = "xenial"
_to_series = "bionic"
_origin = "source"
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
_completed_machines = []
# Subordinates only
_run_action_calls = [
mock.call("{}-hacluster/1".format(_application),
"pause", action_params={}),
mock.call("{}-hacluster/2".format(_application),
"pause", action_params={}),
]
_series_upgrade_calls = []
for machine_num in ("0", "1", "2"):
_series_upgrade_calls.append(
mock.call("{}/{}".format(_application, machine_num),
machine_num, origin=_origin,
from_series=_from_series, to_series=_to_series,
workaround_script=_workaround_script, files=_files,
post_upgrade_functions=None),
)
# Pause subordinates
series_upgrade_utils.series_upgrade_application(
_application, origin=_origin,
to_series=_to_series, from_series=_from_series,
pause_non_leader_primary=False,
pause_non_leader_subordinate=True,
completed_machines=_completed_machines,
workaround_script=_workaround_script, files=_files),
self.run_action.assert_has_calls(_run_action_calls)
self.series_upgrade.assert_has_calls(_series_upgrade_calls)
def test_series_upgrade_application_no_pause(self):
self.patch_object(series_upgrade_utils.model, "run_action")
self.patch_object(series_upgrade_utils, "series_upgrade")
_application = "app"
_from_series = "xenial"
_to_series = "bionic"
_origin = "source"
_series_upgrade_calls = []
_files = ["filename", "scriptname"]
_workaround_script = "scriptname"
_completed_machines = []
for machine_num in ("0", "1", "2"):
_series_upgrade_calls.append(
mock.call("{}/{}".format(_application, machine_num),
machine_num, origin=_origin,
from_series=_from_series, to_series=_to_series,
workaround_script=_workaround_script, files=_files,
post_upgrade_functions=None),
)
# No Pausiing
series_upgrade_utils.series_upgrade_application(
_application, origin=_origin,
to_series=_to_series, from_series=_from_series,
pause_non_leader_primary=False,
pause_non_leader_subordinate=False,
completed_machines=_completed_machines,
workaround_script=_workaround_script, files=_files)
self.run_action.assert_not_called()
self.series_upgrade.assert_has_calls(_series_upgrade_calls)
def test_dist_upgrade(self):
_unit = "app/2"
series_upgrade_utils.dist_upgrade(_unit)
dist_upgrade_cmd = (
"""sudo DEBIAN_FRONTEND=noninteractive apt --assume-yes """
"""-o "Dpkg::Options::=--force-confdef" """
"""-o "Dpkg::Options::=--force-confold" dist-upgrade""")
self.model.run_on_unit.assert_has_calls([
mock.call(_unit, 'sudo apt update'),
mock.call(_unit, dist_upgrade_cmd)])
def test_do_release_upgrade(self):
_unit = "app/2"
series_upgrade_utils.do_release_upgrade(_unit)
self.run_via_ssh.assert_called_once_with(
_unit,
'DEBIAN_FRONTEND=noninteractive do-release-upgrade '
'-f DistUpgradeViewNonInteractive')
def test_wrap_do_release_upgrade(self):
self.patch_object(series_upgrade_utils, "do_release_upgrade")
self.patch_object(series_upgrade_utils.model, "scp_to_unit")
_unit = "app/2"
_from_series = "xenial"
_to_series = "bionic"
_workaround_script = "scriptname"
_files = ["filename", _workaround_script]
_scp_calls = []
_run_calls = [
mock.call(_unit, _workaround_script)]
for filename in _files:
_scp_calls.append(mock.call(_unit, filename, filename))
series_upgrade_utils.wrap_do_release_upgrade(
_unit, to_series=_to_series, from_series=_from_series,
workaround_script=_workaround_script, files=_files)
self.scp_to_unit.assert_has_calls(_scp_calls)
self.run_via_ssh.assert_has_calls(_run_calls)
self.do_release_upgrade.assert_called_once_with(_unit)
def test_app_config_openstack_charm(self):
upgrade = series_upgrade_utils.async_series_upgrade_application
expected = {
'origin': 'openstack-origin',
'pause_non_leader_subordinate': True,
'pause_non_leader_primary': True,
'upgrade_function': upgrade,
'post_upgrade_functions': [],
}
config = series_upgrade_utils.app_config('keystone')
self.assertEqual(expected, config)
def test_app_config_mongo(self):
upgrade = series_upgrade_utils.async_series_upgrade_non_leaders_first
expected = {
'origin': None,
'pause_non_leader_subordinate': True,
'pause_non_leader_primary': True,
'upgrade_function': upgrade,
'post_upgrade_functions': [],
}
config = series_upgrade_utils.app_config('mongodb')
self.assertEqual(expected, config)
def test_app_config_ceph(self):
upgrade = series_upgrade_utils.async_series_upgrade_application
expected = {
'origin': 'source',
'pause_non_leader_subordinate': False,
'pause_non_leader_primary': False,
'upgrade_function': upgrade,
'post_upgrade_functions': [],
}
config = series_upgrade_utils.app_config('ceph-mon')
self.assertEqual(expected, config)

View File

@@ -0,0 +1,187 @@
import copy
import mock
import unit_tests.utils as ut_utils
import uuid
import zaza.model
import zaza.openstack.utilities.swift as swift_utils
import zaza.openstack.utilities.juju as juju_utils
import unit_tests.utilities.swift_test_data as swift_test_data
class TestSwiftUtils(ut_utils.BaseTestCase):
def setUp(self):
super(TestSwiftUtils, self).setUp()
def test_ObjectReplica_init(self):
obj_rep = swift_utils.ObjectReplica(
"Server:Port Device 10.5.0.38:6000 loop0")
self.assertEqual(
obj_rep.server,
"10.5.0.38")
self.assertEqual(
obj_rep.port,
"6000")
self.assertEqual(
obj_rep.device,
"loop0")
self.assertFalse(obj_rep.handoff_device)
obj_rep = swift_utils.ObjectReplica(
"Server:Port Device 10.5.0.9:6000 loop0 [Handoff]")
self.assertTrue(obj_rep.handoff_device)
def test_ObjectReplicas(self):
self.patch_object(zaza.model, 'run_on_leader')
self.run_on_leader.return_value = {
'Stdout': swift_test_data.SWIFT_GET_NODES_STDOUT}
obj_replicas = swift_utils.ObjectReplicas(
'swift-proxy-region1',
'account123',
'my-container',
'my-object',
swift_test_data.STORAGE_TOPOLOGY,
'my-model')
self.assertEqual(
sorted(obj_replicas.hand_off_ips),
['10.5.0.15', '10.5.0.18', '10.5.0.34', '10.5.0.9'])
self.assertEqual(
sorted(obj_replicas.storage_ips),
['10.5.0.38', '10.5.0.4'])
self.assertEqual(
obj_replicas.placements,
[
{
'app_name': 'swift-storage-region2-zone3',
'region': 2,
'unit': 'swift-storage-region2-zone3/0',
'zone': 3},
{
'app_name': 'swift-storage-region1-zone3',
'region': 1,
'unit': 'swift-storage-region1-zone3/0',
'zone': 3}])
self.assertEqual(
obj_replicas.distinct_regions,
[1, 2])
self.assertEqual(
sorted(obj_replicas.all_zones),
[(1, 3), (2, 3)])
self.assertEqual(
sorted(obj_replicas.distinct_zones),
[(1, 3), (2, 3)])
def test_get_swift_storage_topology(self):
unit_r1z1_mock = mock.MagicMock(public_address='10.5.0.18')
unit_r1z2_mock = mock.MagicMock(public_address='10.5.0.34')
unit_r1z3_mock = mock.MagicMock(public_address='10.5.0.4')
unit_r2z1_mock = mock.MagicMock(public_address='10.5.0.9')
unit_r2z2_mock = mock.MagicMock(public_address='10.5.0.15')
unit_r2z3_mock = mock.MagicMock(public_address='10.5.0.38')
app_units = {
'swift-storage-region1-zone1': [unit_r1z1_mock],
'swift-storage-region1-zone2': [unit_r1z2_mock],
'swift-storage-region1-zone3': [unit_r1z3_mock],
'swift-storage-region2-zone1': [unit_r2z1_mock],
'swift-storage-region2-zone2': [unit_r2z2_mock],
'swift-storage-region2-zone3': [unit_r2z3_mock]}
expected_topology = copy.deepcopy(swift_test_data.STORAGE_TOPOLOGY)
self.patch_object(juju_utils, 'get_full_juju_status')
self.patch_object(zaza.model, 'get_application_config')
self.patch_object(zaza.model, 'get_units')
juju_status = mock.MagicMock()
juju_status.applications = {}
self.get_full_juju_status.return_value = juju_status
for app_name, units in app_units.items():
expected_topology[units[0].public_address]['unit'] = units[0]
app_config = {}
for app_name in app_units.keys():
juju_status.applications[app_name] = {'charm': 'cs:swift-storage'}
region = int(app_name.split('-')[2].replace('region', ''))
zone = int(app_name.split('-')[3].replace('zone', ''))
app_config[app_name] = {
'storage-region': {'value': region},
'zone': {'value': zone}}
self.get_application_config.side_effect = \
lambda x, model_name: app_config[x]
self.get_units.side_effect = lambda x, model_name: app_units[x]
self.assertEqual(
swift_utils.get_swift_storage_topology(),
expected_topology)
def test_setup_test_container(self):
swift_client = mock.MagicMock()
self.patch_object(uuid, 'uuid1', return_value='auuid')
swift_client.get_account.return_value = (
{'x-account-project-domain-id': 'domain-id'},
'bob-auuid-container')
self.assertEqual(
swift_utils.setup_test_container(swift_client, 'bob'),
('bob-auuid-container', 'domain-id'))
swift_client.put_container.assert_called_once_with(
'bob-auuid-container')
def test_apply_proxy_config(self):
self.patch_object(zaza.model, 'block_until_all_units_idle')
self.patch_object(
zaza.model,
'get_application_config',
return_value={
'go-faster': {
'value': False}})
self.patch_object(zaza.model, 'set_application_config')
swift_utils.apply_proxy_config(
'proxy-app',
{'go-faster': True})
self.set_application_config.assert_called_once_with(
'proxy-app', {'go-faster': True}, model_name=None)
def test_apply_proxy_config_noop(self):
self.patch_object(zaza.model, 'block_until_all_units_idle')
self.patch_object(
zaza.model,
'get_application_config',
return_value={
'go-faster': {
'value': True}})
self.patch_object(zaza.model, 'set_application_config')
swift_utils.apply_proxy_config(
'proxy-app',
{'go-faster': True})
self.assertFalse(self.set_application_config.called)
def test_create_object(self):
self.patch_object(swift_utils, 'setup_test_container')
self.setup_test_container.return_value = ('new-container', 'domain-id')
self.patch_object(
swift_utils,
'ObjectReplicas',
return_value='obj_replicas')
swift_client = mock.MagicMock()
self.assertEqual(
swift_utils.create_object(
swift_client,
'proxy-app',
swift_test_data.STORAGE_TOPOLOGY,
'my-prefix'),
('new-container', 'zaza_test_object.txt', 'obj_replicas'))
self.setup_test_container.assert_called_once_with(
swift_client,
'my-prefix')
swift_client.put_object.assert_called_once_with(
'new-container',
'zaza_test_object.txt',
content_type='text/plain',
contents='File contents')
self.ObjectReplicas.assert_called_once_with(
'proxy-app',
'domain-id',
'new-container',
'zaza_test_object.txt',
swift_test_data.STORAGE_TOPOLOGY,
model_name=None)

View File

@@ -0,0 +1,131 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
import pprint
import unit_tests.utils as ut_utils
import zaza.openstack.utilities.upgrade_utils as openstack_upgrade
class TestUpgradeUtils(ut_utils.BaseTestCase):
def setUp(self):
super(TestUpgradeUtils, self).setUp()
self.patch_object(
openstack_upgrade.zaza.model,
"get_units")
self.juju_status = mock.MagicMock()
self.patch_object(
openstack_upgrade.zaza.model,
"get_status",
return_value=self.juju_status)
self.patch_object(
openstack_upgrade.zaza.model,
"get_application_config")
def _get_application_config(app, model_name=None):
app_config = {
'ceph-mon': {'verbose': True, 'source': 'old-src'},
'neutron-openvswitch': {'verbose': True},
'ntp': {'verbose': True},
'percona-cluster': {'verbose': True, 'source': 'old-src'},
'cinder': {
'verbose': True,
'openstack-origin': 'old-src',
'action-managed-upgrade': False},
'neutron-api': {
'verbose': True,
'openstack-origin': 'old-src',
'action-managed-upgrade': False},
'nova-compute': {
'verbose': True,
'openstack-origin': 'old-src',
'action-managed-upgrade': False},
}
return app_config[app]
self.get_application_config.side_effect = _get_application_config
self.juju_status.applications = {
'mydb': { # Filter as it is on UPGRADE_EXCLUDE_LIST
'charm': 'cs:percona-cluster'},
'neutron-openvswitch': { # Filter as it is a subordinates
'charm': 'cs:neutron-openvswitch',
'subordinate-to': 'nova-compute'},
'ntp': { # Filter as it has no source option
'charm': 'cs:ntp'},
'nova-compute': {
'charm': 'cs:nova-compute',
'units': {
'nova-compute/0': {
'subordinates': {
'neutron-openvswitch/2': {
'charm': 'cs:neutron-openvswitch-22'}}}}},
'cinder': {
'charm': 'cs:cinder-23',
'units': {
'cinder/1': {
'subordinates': {
'cinder-hacluster/0': {
'charm': 'cs:hacluster-42'},
'cinder-ceph/3': {
'charm': 'cs:cinder-ceph-2'}}}}}}
def test_get_upgrade_candidates(self):
expected = copy.deepcopy(self.juju_status.applications)
self.assertEqual(
openstack_upgrade.get_upgrade_candidates(),
expected)
def test_get_upgrade_groups(self):
expected = [
('Database Services', []),
('Stateful Services', []),
('Core Identity', []),
('Control Plane', ['cinder']),
('Data Plane', ['nova-compute']),
('sweep_up', [])]
actual = openstack_upgrade.get_upgrade_groups()
pprint.pprint(expected)
pprint.pprint(actual)
self.assertEqual(
actual,
expected)
def test_get_series_upgrade_groups(self):
expected = [
('Database Services', ['mydb']),
('Stateful Services', []),
('Core Identity', []),
('Control Plane', ['cinder']),
('Data Plane', ['nova-compute']),
('sweep_up', ['ntp'])]
actual = openstack_upgrade.get_series_upgrade_groups()
pprint.pprint(expected)
pprint.pprint(actual)
self.assertEqual(
actual,
expected)
def test_extract_charm_name_from_url(self):
self.assertEqual(
openstack_upgrade.extract_charm_name_from_url(
'local:bionic/heat-12'),
'heat')
self.assertEqual(
openstack_upgrade.extract_charm_name_from_url(
'cs:bionic/heat-12'),
'heat')
self.assertEqual(
openstack_upgrade.extract_charm_name_from_url('cs:heat'),
'heat')

View File

@@ -19,6 +19,7 @@
"""Module to provide helper for writing unit tests."""
import asyncio
import contextlib
import io
import mock
@@ -96,3 +97,24 @@ class BaseTestCase(unittest.TestCase):
started.return_value = return_value
self._patches_start[name] = started
setattr(self, name, started)
class AioTestCase(BaseTestCase):
def __init__(self, methodName='runTest', loop=None):
self.loop = loop or asyncio.get_event_loop()
self._function_cache = {}
super(AioTestCase, self).__init__(methodName=methodName)
def coroutine_function_decorator(self, func):
def wrapper(*args, **kw):
return self.loop.run_until_complete(func(*args, **kw))
return wrapper
def __getattribute__(self, item):
attr = object.__getattribute__(self, item)
if asyncio.iscoroutinefunction(attr) and item.startswith('test_'):
if item not in self._function_cache:
self._function_cache[item] = (
self.coroutine_function_decorator(attr))
return self._function_cache[item]
return attr

View File

@@ -0,0 +1,15 @@
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing aodh."""

View File

@@ -0,0 +1,241 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Aodh testing."""
import logging
import tenacity
import novaclient.exceptions
import zaza.model
import zaza.openstack.configure.guest
import zaza.openstack.charm_tests.glance.setup as glance_setup
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.generic as generic_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.configure.telemetry as telemetry_utils
class AodhTest(test_utils.OpenStackBaseTest):
"""Encapsulate Aodh tests."""
RESOURCE_PREFIX = 'zaza-aodhtests'
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(AodhTest, cls).setUpClass(application_name='aodh')
cls.xenial_ocata = openstack_utils.get_os_release('xenial_ocata')
cls.xenial_newton = openstack_utils.get_os_release('xenial_newton')
cls.bionic_stein = openstack_utils.get_os_release('bionic_stein')
cls.release = openstack_utils.get_os_release()
cls.keystone_session = openstack_utils.get_overcloud_keystone_session()
cls.model_name = zaza.model.get_juju_model()
cls.aodh_client = openstack_utils.get_aodh_session_client(
cls.keystone_session)
@classmethod
def tearDown(cls):
"""Remove test resources."""
logging.info('Running teardown')
cache_wait = False
for alarm in cls.aodh_client.alarm.list():
if alarm['name'].startswith(cls.RESOURCE_PREFIX):
cache_wait = True
logging.info('Removing Alarm {}'.format(alarm['name']))
telemetry_utils.delete_alarm(
cls.aodh_client,
alarm['name'],
cache_wait=False)
if cache_wait:
logging.info('Waiting for alarm cache to clear')
telemetry_utils.alarm_cache_wait()
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
reraise=True, stop=tenacity.stop_after_attempt(8))
def query_aodh_api(self):
"""Check that aodh api is responding."""
self.aodh_client.alarm.list()
@tenacity.retry(
retry=tenacity.retry_if_result(lambda ret: ret is not None),
wait=tenacity.wait_fixed(120),
stop=tenacity.stop_after_attempt(2))
def _retry_check_commands_on_units(self, cmds, units):
return generic_utils.check_commands_on_units(cmds, units)
@property
def services(self):
"""Return a list of the service that should be running."""
if self.release >= self.xenial_ocata:
services = [
'apache2',
'aodh-evaluator: AlarmEvaluationService worker(0)',
'aodh-notifier: AlarmNotifierService worker(0)',
('aodh-listener: EventAlarmEvaluationService'
' worker(0)')]
elif self.release >= self.xenial_newton:
services = [
('/usr/bin/python /usr/bin/aodh-api --port 8032 -- '
'--config-file=/etc/aodh/aodh.conf '
'--log-file=/var/log/aodh/aodh-api.log'),
'aodh-evaluator - AlarmEvaluationService(0)',
'aodh-notifier - AlarmNotifierService(0)',
'aodh-listener - EventAlarmEvaluationService(0)']
else:
services = [
'aodh-api',
'aodh-evaluator',
'aodh-notifier',
'aodh-listener']
return services
def test_100_test_api(self):
"""Check api by creating an alarm."""
alarm_name = '{}_test_api_alarm'.format(self.RESOURCE_PREFIX)
logging.info('Creating alarm {}'.format(alarm_name))
alarm = telemetry_utils.create_server_power_off_alarm(
self.aodh_client,
alarm_name,
'some-uuid')
alarm_state = telemetry_utils.get_alarm_state(
self.aodh_client,
alarm['alarm_id'])
logging.info('alarm_state: {}'.format(alarm_state))
# Until data is collected alarm come up in an 'insufficient data'
# state.
self.assertEqual(alarm_state, 'insufficient data')
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change.
Change debug mode and assert that change propagates to the correct
file and that services are restarted as a result
"""
# Config file affected by juju set config change
conf_file = '/etc/aodh/aodh.conf'
# Make config change, check for service restarts
self.restart_on_changed_debug_oslo_config_file(
conf_file,
self.services)
self.query_aodh_api()
def test_901_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started
"""
with self.pause_resume(
self.services,
pgrep_full=False):
logging.info("Testing pause resume")
self.query_aodh_api()
def test_902_nrpe_service_checks(self):
"""Confirm that the NRPE service check files are created."""
units = zaza.model.get_units('aodh')
cmds = []
if self.release >= self.xenial_ocata:
services = ['aodh-evaluator', 'aodh-notifier',
'aodh-listener', 'apache2']
else:
services = ['aodh-api', 'aodh-evaluator',
'aodh-notifier', 'aodh-listener']
for check_name in services:
cmds.append(
'egrep -oh /usr/local.* /etc/nagios/nrpe.d/'
'check_{}.cfg'.format(check_name)
)
ret = self._retry_check_commands_on_units(cmds, units)
if ret:
logging.info(ret)
self.assertIsNone(ret, msg=ret)
class AodhServerAlarmTest(test_utils.OpenStackBaseTest):
"""Test server events trigger Aodh alarms."""
RESOURCE_PREFIX = 'zaza-aodhtests'
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(AodhServerAlarmTest, cls).setUpClass(application_name='aodh')
cls.aodh_client = openstack_utils.get_aodh_session_client(
cls.keystone_session)
cls.nova_client = openstack_utils.get_nova_session_client(
cls.keystone_session)
cls.run_resource_cleanup = True
@classmethod
def resource_cleanup(cls):
"""Remove test resources."""
logging.info('Running teardown')
for alarm in cls.aodh_client.alarm.list():
if alarm['name'].startswith(cls.RESOURCE_PREFIX):
logging.info('Removing Alarm {}'.format(alarm['name']))
telemetry_utils.delete_alarm(
cls.aodh_client,
alarm['name'],
cache_wait=False)
for server in cls.nova_client.servers.list():
if server.name.startswith(cls.RESOURCE_PREFIX):
logging.info('Removing server {}'.format(server.name))
openstack_utils.delete_resource(
cls.nova_client.servers,
server.id,
msg="server")
def test_alarm_on_power_off(self):
"""Test server alarm is triggered when server is powered off."""
server_name = '{}-server'.format(self.RESOURCE_PREFIX)
alarm_name = '{}_instance_off'.format(self.RESOURCE_PREFIX)
try:
server = self.nova_client.servers.find(name=server_name)
logging.info("Found existing server {}".format(server_name))
except novaclient.exceptions.NotFound:
logging.info("Launching new server {}".format(server_name))
server = zaza.openstack.configure.guest.launch_instance(
glance_setup.LTS_IMAGE_NAME,
vm_name=server_name)
assert server.status == 'ACTIVE', "Server {} not active".format(
server.name)
logging.info('Deleting alarm {} if it exists'.format(alarm_name))
telemetry_utils.delete_alarm(
self.aodh_client,
alarm_name,
cache_wait=True)
logging.info('Creating alarm {}'.format(alarm_name))
alarm_info = telemetry_utils.create_server_power_off_alarm(
self.aodh_client,
alarm_name,
server.id)
alarm_state = telemetry_utils.get_alarm_state(
self.aodh_client,
alarm_info['alarm_id'])
logging.info('Alarm in state {}'.format(alarm_state))
# Until data is collected alarm come up in an 'insufficient data'
# state.
self.assertEqual(alarm_state, 'insufficient data')
logging.info('Stopping server {}'.format(server.name))
server.stop()
telemetry_utils.block_until_alarm_state(
self.aodh_client,
alarm_info['alarm_id'])

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing barbican."""

View File

@@ -0,0 +1,81 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate barbican testing."""
import logging
import barbicanclient.client as barbican_client
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
class BarbicanTest(test_utils.OpenStackBaseTest):
"""Run barbican specific tests."""
_SERVICES = ['apache2', 'barbican-worker']
def test_110_catalog_endpoints(self):
"""Verify that the endpoints are present in the catalog."""
overcloud_auth = openstack_utils.get_overcloud_auth()
keystone_client = openstack_utils.get_keystone_client(
overcloud_auth)
actual_endpoints = keystone_client.service_catalog.get_endpoints()
for service_type in ('key-manager', 'identity'):
actual_interfaces = [endpoint['interface'] for endpoint in
actual_endpoints[service_type]]
for expected_interface in ('internal', 'admin', 'public'):
assert(expected_interface in actual_interfaces)
def test_400_api_connection(self):
"""Simple api calls to check service is up and responding."""
logging.info('Authenticating with the barbican endpoint')
overcloud_auth = openstack_utils.get_overcloud_auth()
keystone_client = openstack_utils.get_keystone_client(
overcloud_auth)
keystone_session = openstack_utils.get_overcloud_keystone_session()
barbican_endpoint = keystone_client.service_catalog.url_for(
service_type='key-manager', interface='publicURL')
barbican = barbican_client.Client(session=keystone_session,
endpoint=barbican_endpoint)
logging.info('Creating a secret')
my_secret = barbican.secrets.create()
my_secret.name = u'Random plain text password'
my_secret.payload = u'password'
logging.info('Storing the secret')
my_secret_ref = my_secret.store()
assert(my_secret_ref is not None)
logging.info('Deleting the secret')
my_secret.delete()
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change.
Change debug mode and assert that change propagates to the correct
file and that services are restarted as a result
"""
self.restart_on_changed_debug_oslo_config_file(
'/etc/barbican/barbican.conf', self._SERVICES)
def test_910_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started
"""
with self.pause_resume(self._SERVICES):
logging.info("Testing pause resume")

View File

@@ -0,0 +1,17 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing ceilometer."""

View File

@@ -0,0 +1,45 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for configuring Ceilometer."""
import logging
import zaza.model as zaza_model
import zaza.openstack.utilities.openstack as openstack_utils
def basic_setup():
"""Run setup for testing Ceilometer.
Setup for testing Ceilometer is currently part of functional
tests.
"""
current_release = openstack_utils.get_os_release()
xenial_ocata = openstack_utils.get_os_release('xenial_ocata')
if current_release < xenial_ocata:
logging.info(
'Skipping ceilometer-upgrade as it is not supported before ocata')
return
logging.debug('Checking ceilometer-upgrade')
action = zaza_model.run_action_on_leader(
'ceilometer',
'ceilometer-upgrade',
raise_on_failure=True)
return action

View File

@@ -0,0 +1,164 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Ceilometer testing."""
import copy
import logging
import ceilometerclient.v2.client as ceilo_client
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
class CeilometerTest(test_utils.OpenStackBaseTest):
"""Encapsulate Ceilometer tests."""
CONF_FILE = '/etc/ceilometer/ceilometer.conf'
XENIAL_PIKE = openstack_utils.get_os_release('xenial_pike')
XENIAL_OCATA = openstack_utils.get_os_release('xenial_ocata')
XENIAL_NEWTON = openstack_utils.get_os_release('xenial_newton')
XENIAL_MITAKA = openstack_utils.get_os_release('xenial_mitaka')
TRUSTY_MITAKA = openstack_utils.get_os_release('trusty_mitaka')
@classmethod
def setUpClass(cls):
"""Run class setup for running Ceilometer tests."""
super(CeilometerTest, cls).setUpClass()
cls.current_release = openstack_utils.get_os_release()
@property
def services(self):
"""Return a list of services for the selected OpenStack release."""
services = []
if self.application_name == 'ceilometer-agent':
if self.current_release <= CeilometerTest.XENIAL_MITAKA:
services.append('ceilometer-polling')
else:
services.append('ceilometer-polling: AgentManager worker(0)')
return services
# Note: disabling ceilometer-polling and ceilometer-agent-central due
# to bug 1846390: https://bugs.launchpad.net/bugs/1846390
if self.current_release >= CeilometerTest.XENIAL_PIKE:
# services.append('ceilometer-polling: AgentManager worker(0)')
services.append('ceilometer-agent-notification: '
'NotificationService worker(0)')
elif self.current_release >= CeilometerTest.XENIAL_OCATA:
services.append('ceilometer-collector: CollectorService worker(0)')
# services.append('ceilometer-polling: AgentManager worker(0)')
services.append('ceilometer-agent-notification: '
'NotificationService worker(0)')
services.append('apache2')
elif self.current_release >= CeilometerTest.XENIAL_NEWTON:
services.append('ceilometer-collector - CollectorService(0)')
# services.append('ceilometer-polling - AgentManager(0)')
services.append('ceilometer-agent-notification - '
'NotificationService(0)')
services.append('ceilometer-api')
else:
services.append('ceilometer-collector')
services.append('ceilometer-api')
services.append('ceilometer-agent-notification')
if self.current_release < CeilometerTest.TRUSTY_MITAKA:
services.append('ceilometer-alarm-notifier')
services.append('ceilometer-alarm-evaluator')
return services
@property
def restartable_services(self):
"""Return a list of services that are known to be restartable.
For the selected OpenStack release these services are known to be able
to be stopped and started with no issues.
"""
# Due to Bug #1861321 ceilometer-collector does not reliably
# restart.
_services = copy.deepcopy(self.services)
if self.current_release <= CeilometerTest.TRUSTY_MITAKA:
try:
_services.remove('ceilometer-collector')
except ValueError:
pass
return _services
def test_400_api_connection(self):
"""Simple api calls to check service is up and responding."""
if self.current_release >= CeilometerTest.XENIAL_OCATA:
logging.info('Skipping API checks as ceilometer api has been '
'removed')
return
logging.info('Instantiating ceilometer client...')
ceil = ceilo_client.Client(
session=openstack_utils.get_overcloud_keystone_session()
)
logging.info('Checking api functionality...')
assert(ceil.samples.list() == [])
assert(ceil.meters.list() == [])
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change."""
config_name = 'debug'
if self.application_name == 'ceilometer-agent':
config_name = 'use-internal-endpoints'
# Expected default and alternate values
current_value = openstack_utils.get_application_config_option(
self.application_name, config_name
)
assert type(current_value) == bool
new_value = not current_value
# Convert bool to str
current_value = str(current_value)
new_value = str(new_value)
set_default = {config_name: current_value}
set_alternate = {config_name: new_value}
default_entry = {'DEFAULT': {'debug': [current_value]}}
alternate_entry = {'DEFAULT': {'debug': [new_value]}}
if self.application_name == 'ceilometer-agent':
default_entry = None
alternate_entry = {
'service_credentials': {'interface': ['internalURL']}
}
logging.info('changing config: {}'.format(set_alternate))
self.restart_on_changed(
CeilometerTest.CONF_FILE,
set_default,
set_alternate,
default_entry,
alternate_entry,
self.restartable_services)
def test_901_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started.
"""
with self.pause_resume(self.restartable_services):
logging.info("Testing pause and resume")

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for benchmarking ceph."""

View File

@@ -0,0 +1,124 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ceph Benchmark Tests."""
import logging
import re
import unittest
import zaza.model
class BenchmarkTests(unittest.TestCase):
"""Ceph Bencharmk Tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running ceph benchmark tests."""
super().setUpClass()
cls.results_match = "^[A-Z].*"
cls.pool = "zaza_benchmarks"
cls.test_results = {}
cls.time_in_secs = 30
def parse_bench_results(self, results_string):
"""Parse bench results from string.
:param results string: Output from rados bench command.
With newlines due to juju run's output.
:type results_string: string
:returns: Dictionary of results summary
:rtype: dict
"""
_results = {}
_lines = results_string.split("\n")
for _line in _lines:
_line = _line.strip()
if re.match(self.results_match, _line):
_keyvalues = _line.split(":")
try:
_results[_keyvalues[0].strip()] = _keyvalues[1].strip()
except IndexError:
# Skipping detailed output for summary details
pass
return _results
def run_rados_bench(self, action, params=None):
"""Run rados bench.
:param action: String rados bench command i.e. write, rand, seq
:type action: string
:param params: List of string extra parameters to rados bench command
:type params: List[strings]
:returns: Unit run dict result
:rtype: dict
"""
_cmd = "rados bench -p {} {} {}".format(
self.pool, self.time_in_secs, action)
if params:
_cmd += " "
_cmd += " ".join(params)
logging.info(
"Running '{}' for {} seconds ...".format(_cmd, self.time_in_secs))
_result = zaza.model.run_on_leader(
"ceph-mon", _cmd, timeout=self.time_in_secs + 60)
return _result
def test_001_create_pool(self):
"""Create ceph pool."""
_cmd = "ceph osd pool create {} 100 100".format(self.pool)
_result = zaza.model.run_on_leader(
"ceph-mon", _cmd)
if _result.get("Code") and not _result.get("Code").startswith('0'):
if "already exists" in _result.get("Stderr", ""):
logging.warning(
"Ceph osd pool {} already exits.".format(self.pool))
else:
logging.error("Ceph osd pool create failed")
raise Exception(_result.get("Stderr", ""))
def test_100_rados_bench_write(self):
"""Rados bench write test."""
_result = self.run_rados_bench("write", params=["--no-cleanup"])
self.test_results["write"] = (
self.parse_bench_results(_result.get("Stdout", "")))
def test_200_rados_bench_read_seq(self):
"""Rados bench read sequential test."""
_result = self.run_rados_bench("seq")
self.test_results["read_seq"] = (
self.parse_bench_results(_result.get("Stdout", "")))
def test_300_rados_bench_read_rand(self):
"""Rados bench read random test."""
_result = self.run_rados_bench("rand")
self.test_results["read_rand"] = (
self.parse_bench_results(_result.get("Stdout", "")))
def test_998_rados_cleanup(self):
"""Cleanup rados bench data."""
_cmd = "rados -p {} cleanup".format(self.pool)
_result = zaza.model.run_on_leader("ceph-mon", _cmd)
if _result.get("Code") and not _result.get("Code").startswith('0'):
logging.warning("rados cleanup failed")
def test_999_print_rados_bench_results(self):
"""Print rados bench results."""
print("######## Begin Ceph Results ########")
for test, results in self.test_results.items():
print("##### {} ######".format(test))
for key, value in results.items():
print("{}: {}".format(key, value))
print("######## End Ceph Results ########")

View File

@@ -0,0 +1,15 @@
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing ``ceph-dashboard``."""

View File

@@ -0,0 +1,97 @@
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulating `ceph-dashboard` testing."""
import collections
import os
import requests
import zaza
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.utilities.deployment_env as deployment_env
class CephDashboardTest(test_utils.BaseCharmTest):
"""Class for `ceph-dashboard` tests."""
REMOTE_CERT_FILE = ('/usr/local/share/ca-certificates/'
'vault_ca_cert_dashboard.crt')
@classmethod
def setUpClass(cls):
"""Run class setup for running ceph dashboard tests."""
super().setUpClass()
cls.application_name = 'ceph-dashboard'
cls.local_ca_cert = cls.collect_ca()
@classmethod
def collect_ca(cls):
"""Collect CA from ceph-dashboard unit."""
local_ca_cert = os.path.join(
deployment_env.get_tmpdir(),
os.path.basename(cls.REMOTE_CERT_FILE))
if not os.path.isfile(local_ca_cert):
units = zaza.model.get_units(cls.application_name)
zaza.model.scp_from_unit(
units[0].entity_id,
cls.REMOTE_CERT_FILE,
local_ca_cert)
return local_ca_cert
def test_dashboard_units(self):
"""Check dashboard units are configured correctly."""
# XXX: Switch to using CA for verification when
# https://bugs.launchpad.net/cloud-archive/+bug/1933410
# is fix released.
# verify = self.local_ca_cert
verify = False
units = zaza.model.get_units(self.application_name)
rcs = collections.defaultdict(list)
for unit in units:
r = requests.get(
'https://{}:8443'.format(unit.public_address),
verify=verify,
allow_redirects=False)
rcs[r.status_code].append(unit.public_address)
self.assertEqual(len(rcs[requests.codes.ok]), 1)
self.assertEqual(len(rcs[requests.codes.see_other]), len(units) - 1)
def create_user(self, username, role='administrator'):
"""Create a dashboard user.
:param username: Username to create.
:type username: str
:param role: Role to grant to user.
:type role: str
:returns: Results from action.
:rtype: juju.action.Action
"""
action = zaza.model.run_action_on_leader(
'ceph-dashboard',
'add-user',
action_params={
'username': username,
'role': role})
return action
def test_create_user(self):
"""Test create user action."""
test_user = 'marvin'
action = self.create_user(test_user)
self.assertEqual(action.status, "completed")
self.assertTrue(action.data['results']['password'])
action = self.create_user(test_user)
# Action should fail as the user already exists
self.assertEqual(action.status, "failed")

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing ceph-fs."""

View File

@@ -0,0 +1,124 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate CephFS testing."""
import logging
from tenacity import Retrying, stop_after_attempt, wait_exponential
import zaza.model as model
import zaza.openstack.charm_tests.neutron.tests as neutron_tests
import zaza.openstack.charm_tests.nova.utils as nova_utils
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.configure.guest as guest
import zaza.openstack.utilities.openstack as openstack_utils
class CephFSTests(test_utils.OpenStackBaseTest):
"""Encapsulate CephFS tests."""
RESOURCE_PREFIX = 'zaza-cephfstests'
INSTANCE_USERDATA = """#cloud-config
packages:
- ceph-fuse
- python
mounts:
- [ 'none', '/mnt/cephfs', 'fuse.ceph', 'ceph.id=admin,ceph.conf=/etc/ceph/ceph.conf,_netdev,defaults', '0', '0' ]
write_files:
- content: |
{}
path: /etc/ceph/ceph.conf
- content: |
{}
path: /etc/ceph/ceph.client.admin.keyring
""" # noqa
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(CephFSTests, cls).setUpClass()
def test_cephfs_share(self):
"""Test that CephFS shares can be accessed on two instances.
1. Spawn two servers
2. mount it on both
3. write a file on one
4. read it on the other
5. profit
"""
keyring = model.run_on_leader(
'ceph-mon', 'cat /etc/ceph/ceph.client.admin.keyring')['Stdout']
conf = model.run_on_leader(
'ceph-mon', 'cat /etc/ceph/ceph.conf')['Stdout']
# Spawn Servers
instance_1, instance_2 = self.launch_guests(
userdata=self.INSTANCE_USERDATA.format(
_indent(conf, 8),
_indent(keyring, 8)))
# Write a file on instance_1
def verify_setup(stdin, stdout, stderr):
status = stdout.channel.recv_exit_status()
self.assertEqual(status, 0)
fip_1 = neutron_tests.floating_ips_from_instance(instance_1)[0]
fip_2 = neutron_tests.floating_ips_from_instance(instance_2)[0]
username = guest.boot_tests['bionic']['username']
password = guest.boot_tests['bionic'].get('password')
privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME)
for attempt in Retrying(
stop=stop_after_attempt(3),
wait=wait_exponential(multiplier=1, min=2, max=10)):
with attempt:
openstack_utils.ssh_command(
username, fip_1, 'instance-1',
'sudo mount -a && '
'echo "test" | sudo tee /mnt/cephfs/test',
password=password, privkey=privkey, verify=verify_setup)
def verify(stdin, stdout, stderr):
status = stdout.channel.recv_exit_status()
self.assertEqual(status, 0)
out = ""
for line in iter(stdout.readline, ""):
out += line
self.assertEqual(out, "test\n")
openstack_utils.ssh_command(
username, fip_2, 'instance-2',
'sudo mount -a && '
'sudo cat /mnt/cephfs/test',
password=password, privkey=privkey, verify=verify)
def _indent(text, amount, ch=' '):
padding = amount * ch
return ''.join(padding+line for line in text.splitlines(True))
class CharmOperationTest(test_utils.BaseCharmTest):
"""CephFS Charm operation tests."""
def test_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped, then resume and check
they are started.
"""
services = ['ceph-mds']
with self.pause_resume(services):
logging.info('Testing pause resume (services="{}")'
.format(services))

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing ``ceph-iscsi``."""

View File

@@ -0,0 +1,30 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for ceph iscsi gateway deployments."""
import zaza.model
def basic_guest_setup():
"""Run basic setup for iscsi guest."""
for unit in zaza.model.get_units('ubuntu'):
setup_cmds = [
"apt install --yes open-iscsi multipath-tools",
"systemctl start iscsi",
"systemctl start iscsid"]
for cmd in setup_cmds:
zaza.model.run_on_unit(
unit.entity_id,
cmd)

View File

@@ -0,0 +1,311 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulating `ceph-iscsi` testing."""
import logging
import tempfile
import zaza
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.generic as generic_utils
class CephISCSIGatewayTest(test_utils.BaseCharmTest):
"""Class for `ceph-iscsi` tests."""
GW_IQN = "iqn.2003-03.com.canonical.iscsi-gw:iscsi-igw"
DATA_POOL_NAME = 'zaza_rep_pool'
EC_PROFILE_NAME = 'zaza_iscsi'
EC_DATA_POOL = 'zaza_ec_data_pool'
EC_METADATA_POOL = 'zaza_ec_metadata_pool'
def get_client_initiatorname(self, unit):
"""Return the initiatorname for the given unit.
:param unit_name: Name of unit to match
:type unit: str
:returns: Initiator name
:rtype: str
"""
generic_utils.assertRemoteRunOK(zaza.model.run_on_unit(
unit,
('cp /etc/iscsi/initiatorname.iscsi /tmp; '
'chmod 644 /tmp/initiatorname.iscsi')))
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_file = '{}/{}'.format(tmpdirname, 'initiatorname.iscsi')
zaza.model.scp_from_unit(
unit,
'/tmp/initiatorname.iscsi',
tmp_file)
with open(tmp_file, 'r') as stream:
contents = stream.readlines()
initiatorname = None
for line in contents:
if line.startswith('InitiatorName'):
initiatorname = line.split('=')[1].rstrip()
return initiatorname
def get_base_ctxt(self):
"""Generate a context for running gwcli commands to create a target.
:returns: Base gateway context
:rtype: Dict
"""
gw_units = zaza.model.get_units('ceph-iscsi')
host_names = generic_utils.get_unit_hostnames(gw_units, fqdn=True)
client_entity_ids = [
u.entity_id for u in zaza.model.get_units('ubuntu')]
ctxt = {
'client_entity_ids': sorted(client_entity_ids),
'gw_iqn': self.GW_IQN,
'chap_creds': 'username={chap_username} password={chap_password}',
'gwcli_gw_dir': '/iscsi-targets/{gw_iqn}/gateways',
'gwcli_hosts_dir': '/iscsi-targets/{gw_iqn}/hosts',
'gwcli_disk_dir': '/disks',
'gwcli_client_dir': '{gwcli_hosts_dir}/{client_initiatorname}',
}
ctxt['gateway_units'] = [
{
'entity_id': u.entity_id,
'ip': u.public_address,
'hostname': host_names[u.entity_id]}
for u in zaza.model.get_units('ceph-iscsi')]
ctxt['gw_ip'] = sorted([g['ip'] for g in ctxt['gateway_units']])[0]
return ctxt
def run_commands(self, unit_name, commands, ctxt):
"""Run commands on unit.
Iterate over each command and apply the context to the command, then
run the command on the supplied unit.
:param unit_name: Name of unit to match
:type unit: str
:param commands: List of commands to run.
:type commands: List[str]
:param ctxt: Context to apply to each command.
:type ctxt: Dict
:raises: AssertionError
"""
for _cmd in commands:
cmd = _cmd.format(**ctxt)
generic_utils.assertRemoteRunOK(zaza.model.run_on_unit(
unit_name,
cmd))
def create_iscsi_target(self, ctxt):
"""Create target on gateway.
:param ctxt: Base gateway context
:type ctxt: Dict
"""
generic_utils.assertActionRanOK(zaza.model.run_action_on_leader(
'ceph-iscsi',
'create-target',
action_params={
'gateway-units': ' '.join([g['entity_id']
for g in ctxt['gateway_units']]),
'iqn': self.GW_IQN,
'rbd-pool-name': ctxt.get('pool_name', ''),
'ec-rbd-metadata-pool': ctxt.get('ec_meta_pool_name', ''),
'image-size': ctxt['img_size'],
'image-name': ctxt['img_name'],
'client-initiatorname': ctxt['client_initiatorname'],
'client-username': ctxt['chap_username'],
'client-password': ctxt['chap_password']
}))
def login_iscsi_target(self, ctxt):
"""Login to the iscsi target on client.
:param ctxt: Base gateway context
:type ctxt: Dict
"""
logging.info("Logging in to iscsi target")
base_op_cmd = ('iscsiadm --mode node --targetname {gw_iqn} '
'--op=update ').format(**ctxt)
setup_cmds = [
'iscsiadm -m discovery -t st -p {gw_ip}',
base_op_cmd + '-n node.session.auth.authmethod -v CHAP',
base_op_cmd + '-n node.session.auth.username -v {chap_username}',
base_op_cmd + '-n node.session.auth.password -v {chap_password}',
'iscsiadm --mode node --targetname {gw_iqn} --login']
self.run_commands(ctxt['client_entity_id'], setup_cmds, ctxt)
def logout_iscsi_targets(self, ctxt):
"""Logout of iscsi target on client.
:param ctxt: Base gateway context
:type ctxt: Dict
"""
logging.info("Logging out of iscsi target")
logout_cmds = [
'iscsiadm --mode node --logoutall=all']
self.run_commands(ctxt['client_entity_id'], logout_cmds, ctxt)
def check_client_device(self, ctxt, init_client=True):
"""Wait for multipath device to appear on client and test access.
:param ctxt: Base gateway context
:type ctxt: Dict
:param init_client: Initialise client if this is the first time it has
been used.
:type init_client: bool
"""
logging.info("Checking multipath device is present.")
device_ctxt = {
'bdevice': '/dev/dm-0',
'mount_point': '/mnt/iscsi',
'test_file': '/mnt/iscsi/test.data'}
ls_bdevice_cmd = 'ls -l {bdevice}'
mkfs_cmd = 'mke2fs {bdevice}'
mkdir_cmd = 'mkdir {mount_point}'
mount_cmd = 'mount {bdevice} {mount_point}'
umount_cmd = 'umount {mount_point}'
check_mounted_cmd = 'mountpoint {mount_point}'
write_cmd = 'truncate -s 1M {test_file}'
check_file = 'ls -l {test_file}'
if init_client:
commands = [
mkfs_cmd,
mkdir_cmd,
mount_cmd,
check_mounted_cmd,
write_cmd,
check_file,
umount_cmd]
else:
commands = [
mount_cmd,
check_mounted_cmd,
check_file,
umount_cmd]
async def check_device_present():
run = await zaza.model.async_run_on_unit(
ctxt['client_entity_id'],
ls_bdevice_cmd.format(bdevice=device_ctxt['bdevice']))
return device_ctxt['bdevice'] in run['stdout']
logging.info("Checking {} is present on {}".format(
device_ctxt['bdevice'],
ctxt['client_entity_id']))
zaza.model.block_until(check_device_present)
logging.info("Checking mounting device and access")
self.run_commands(ctxt['client_entity_id'], commands, device_ctxt)
def create_data_pool(self):
"""Create data pool to back iscsi targets."""
generic_utils.assertActionRanOK(zaza.model.run_action_on_leader(
'ceph-mon',
'create-pool',
action_params={
'name': self.DATA_POOL_NAME}))
def create_ec_data_pool(self):
"""Create data pool to back iscsi targets."""
generic_utils.assertActionRanOK(zaza.model.run_action_on_leader(
'ceph-mon',
'create-erasure-profile',
action_params={
'name': self.EC_PROFILE_NAME,
'coding-chunks': 2,
'data-chunks': 4,
'plugin': 'jerasure'}))
generic_utils.assertActionRanOK(zaza.model.run_action_on_leader(
'ceph-mon',
'create-pool',
action_params={
'name': self.EC_DATA_POOL,
'pool-type': 'erasure-coded',
'allow-ec-overwrites': True,
'erasure-profile-name': self.EC_PROFILE_NAME}))
generic_utils.assertActionRanOK(zaza.model.run_action_on_leader(
'ceph-mon',
'create-pool',
action_params={
'name': self.EC_METADATA_POOL}))
def run_client_checks(self, test_ctxt):
"""Check access to mulipath device.
Write a filesystem to device, mount it and write data. Then unmount
and logout the iscsi target, finally reconnect and remount checking
data is still present.
:param test_ctxt: Test context.
:type test_ctxt: Dict
"""
self.create_iscsi_target(test_ctxt)
self.login_iscsi_target(test_ctxt)
self.check_client_device(test_ctxt, init_client=True)
self.logout_iscsi_targets(test_ctxt)
self.login_iscsi_target(test_ctxt)
self.check_client_device(test_ctxt, init_client=False)
def test_create_and_mount_volume(self):
"""Test creating a target and mounting it on a client."""
self.create_data_pool()
ctxt = self.get_base_ctxt()
client_entity_id = ctxt['client_entity_ids'][0]
ctxt.update({
'client_entity_id': client_entity_id,
'client_initiatorname': self.get_client_initiatorname(
client_entity_id),
'pool_name': self.DATA_POOL_NAME,
'chap_username': 'myiscsiusername1',
'chap_password': 'myiscsipassword1',
'img_size': '1G',
'img_name': 'disk_rep_1'})
self.run_client_checks(ctxt)
def test_create_and_mount_ec_backed_volume(self):
"""Test creating an EC backed target and mounting it on a client."""
self.create_ec_data_pool()
ctxt = self.get_base_ctxt()
client_entity_id = ctxt['client_entity_ids'][1]
ctxt.update({
'client_entity_id': client_entity_id,
'client_initiatorname': self.get_client_initiatorname(
client_entity_id),
'pool_name': self.EC_DATA_POOL,
'ec_meta_pool_name': self.EC_METADATA_POOL,
'chap_username': 'myiscsiusername2',
'chap_password': 'myiscsipassword2',
'img_size': '2G',
'img_name': 'disk_ec_1'})
self.run_client_checks(ctxt)
def test_create_and_mount_volume_default_pool(self):
"""Test creating a target and mounting it on a client."""
self.create_data_pool()
ctxt = self.get_base_ctxt()
client_entity_id = ctxt['client_entity_ids'][2]
ctxt.update({
'client_entity_id': client_entity_id,
'client_initiatorname': self.get_client_initiatorname(
client_entity_id),
'chap_username': 'myiscsiusername3',
'chap_password': 'myiscsipassword3',
'img_size': '3G',
'img_name': 'disk_default_1'})
self.run_client_checks(ctxt)
def test_pause_resume(self):
"""Test pausing and resuming a unit."""
with self.pause_resume(
['rbd-target-api', 'rbd-target-gw'],
pgrep_full=True):
logging.info("Testing pause resume")

View File

@@ -0,0 +1,15 @@
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing ceph-mon for cinder-ceph."""

View File

@@ -0,0 +1,200 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ceph-mon Testing for cinder-ceph."""
import logging
import zaza.model
from zaza.openstack.utilities import (
generic as generic_utils,
openstack as openstack_utils,
exceptions as zaza_exceptions
)
import zaza.openstack.charm_tests.test_utils as test_utils
class CinderCephMonTest(test_utils.OpenStackBaseTest):
"""Verify that the ceph mon units are healthy."""
@classmethod
def setUpClass(cls):
"""Run class setup for running ceph mon tests with cinder."""
super().setUpClass()
# ported from the cinder-ceph Amulet test
def test_499_ceph_cmds_exit_zero(self):
"""Verify expected state with security-checklist."""
logging.info("Checking exit values are 0 on ceph commands.")
units = zaza.model.get_units("ceph-mon", model_name=self.model_name)
current_release = openstack_utils.get_os_release()
bionic_train = openstack_utils.get_os_release('bionic_train')
if current_release < bionic_train:
units.extend(zaza.model.get_units("cinder-ceph",
model_name=self.model_name))
commands = [
'sudo ceph health',
'sudo ceph mds stat',
'sudo ceph pg stat',
'sudo ceph osd stat',
'sudo ceph mon stat',
]
for unit in units:
run_commands(unit.name, commands)
# ported from the cinder-ceph Amulet test
def test_500_ceph_alternatives_cleanup(self):
"""Check ceph alternatives removed when ceph-mon relation is broken."""
# Skip this test if release is less than xenial_ocata as in that case
# cinder HAS a relation with ceph directly and this test would fail
current_release = openstack_utils.get_os_release()
xenial_ocata = openstack_utils.get_os_release('xenial_ocata')
if current_release < xenial_ocata:
logging.info("Skipping test as release < xenial-ocata")
return
units = zaza.model.get_units("cinder-ceph",
model_name=self.model_name)
# check each unit prior to breaking relation
for unit in units:
dir_list = directory_listing(unit.name, "/etc/ceph")
if 'ceph.conf' in dir_list:
logging.debug(
"/etc/ceph/ceph.conf exists BEFORE relation-broken")
else:
raise zaza_exceptions.CephGenericError(
"unit: {} - /etc/ceph/ceph.conf does not exist "
"BEFORE relation-broken".format(unit.name))
# remove the relation so that /etc/ceph/ceph.conf is removed
logging.info("Removing ceph-mon:client <-> cinder-ceph:ceph relation")
zaza.model.remove_relation(
"ceph-mon", "ceph-mon:client", "cinder-ceph:ceph")
# zaza.model.wait_for_agent_status()
logging.info("Wait till relation is removed...")
ceph_mon_units = zaza.model.get_units("ceph-mon",
model_name=self.model_name)
conditions = [
invert_condition(
does_relation_exist(
u.name, "ceph-mon", "cinder-ceph", "ceph",
self.model_name))
for u in ceph_mon_units]
zaza.model.block_until(*conditions)
logging.info("Checking each unit after breaking relation...")
for unit in units:
dir_list = directory_listing(unit.name, "/etc/ceph")
if 'ceph.conf' not in dir_list:
logging.debug(
"/etc/ceph/ceph.conf removed AFTER relation-broken")
else:
raise zaza_exceptions.CephGenericError(
"unit: {} - /etc/ceph/ceph.conf still exists "
"AFTER relation-broken".format(unit.name))
# Restore cinder-ceph and ceph-mon relation to keep tests idempotent
logging.info("Restoring ceph-mon:client <-> cinder-ceph:ceph relation")
zaza.model.add_relation(
"ceph-mon", "ceph-mon:client", "cinder-ceph:ceph")
conditions = [
does_relation_exist(
u.name, "ceph-mon", "cinder-ceph", "ceph", self.model_name)
for u in ceph_mon_units]
logging.info("Wait till model is idle ...")
zaza.model.block_until(*conditions)
zaza.model.block_until_all_units_idle()
logging.info("... Done.")
def does_relation_exist(unit_name,
application_name,
remote_application_name,
remote_interface_name,
model_name):
"""For use in async blocking function, return True if it exists.
:param unit_name: the unit (by name) that to check on.
:type unit_name: str
:param application_name: Name of application on this side of relation
:type application_name: str
:param remote_application_name: the relation name at that unit to check for
:type relation_application_name: str
:param remote_interface_name: the interface name at that unit to check for
:type relation_interface_name: str
:param model_name: the model to check on
:type model_name: str
:returns: Corouting that returns True if the relation was found
:rtype: Coroutine[[], boolean]
"""
async def _async_does_relation_exist_closure():
async with zaza.model.run_in_model(model_name) as model:
spec = "{}:{}".format(
remote_application_name, remote_interface_name)
for rel in model.applications[application_name].relations:
if rel.matches(spec):
return True
return False
return _async_does_relation_exist_closure
def invert_condition(async_condition):
"""Invert the condition provided so it can be provided to the blocking fn.
:param async_condition: the async callable that is the test
:type async_condition: Callable[]
:returns: Corouting that returns not of the result of a the callable
:rtype: Coroutine[[], bool]
"""
async def _async_invert_condition_closure():
return not(await async_condition())
return _async_invert_condition_closure
def run_commands(unit_name, commands):
"""Run commands on unit.
Apply context to commands until all variables have been replaced, then
run the command on the given unit.
"""
errors = []
for cmd in commands:
try:
generic_utils.assertRemoteRunOK(zaza.model.run_on_unit(
unit_name,
cmd))
except Exception as e:
errors.append("unit: {}, command: {}, error: {}"
.format(unit_name, cmd, str(e)))
if errors:
raise zaza_exceptions.CephGenericError("\n".join(errors))
def directory_listing(unit_name, directory):
"""Return a list of files/directories from a directory on a unit.
:param unit_name: the unit to fetch the directory listing from
:type unit_name: str
:param directory: the directory to fetch the listing from
:type directory: str
:returns: A listing using "ls -1" on the unit
:rtype: List[str]
"""
result = zaza.model.run_on_unit(unit_name, "ls -1 {}".format(directory))
return result['Stdout'].splitlines()

View File

@@ -16,6 +16,9 @@
import logging
import unittest
import re
from copy import deepcopy
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.model as zaza_model
@@ -47,3 +50,235 @@ class SecurityTest(unittest.TestCase):
expected_passes,
expected_failures,
expected_to_pass=True)
class OsdService:
"""Simple representation of ceph-osd systemd service."""
def __init__(self, id_):
"""
Init service using its ID.
e.g.: id_=1 -> ceph-osd@1
"""
self.id = id_
self.name = 'ceph-osd@{}'.format(id_)
async def async_wait_for_service_status(unit_name, services, target_status,
model_name=None, timeout=2700):
"""Wait for all services on the unit to be in the desired state.
Note: This function emulates the
`zaza.model.async_block_until_service_status` function, but it's using
`systemctl is-active` command instead of `pidof/pgrep` of the original
function.
:param unit_name: Name of unit to run action on
:type unit_name: str
:param services: List of services to check
:type services: List[str]
:param target_status: State services must be in (stopped or running)
:type target_status: str
:param model_name: Name of model to query.
:type model_name: str
:param timeout: Time to wait for status to be achieved
:type timeout: int
"""
async def _check_service():
services_ok = True
for service in services:
command = r"systemctl is-active '{}'".format(service)
out = await zaza_model.async_run_on_unit(
unit_name,
command,
model_name=model_name,
timeout=timeout)
response = out['Stdout'].strip()
if target_status == "running" and response == 'active':
continue
elif target_status == "stopped" and response == 'inactive':
continue
else:
services_ok = False
break
return services_ok
accepted_states = ('stopped', 'running')
if target_status not in accepted_states:
raise RuntimeError('Invalid target state "{}". Accepted states: '
'{}'.format(target_status, accepted_states))
async with zaza_model.run_in_model(model_name):
await zaza_model.async_block_until(_check_service, timeout=timeout)
wait_for_service = zaza_model.sync_wrapper(async_wait_for_service_status)
class ServiceTest(unittest.TestCase):
"""ceph-osd systemd service tests."""
TESTED_UNIT = 'ceph-osd/0' # This can be any ceph-osd unit in the model
SERVICE_PATTERN = re.compile(r'ceph-osd@(?P<service_id>\d+)\.service')
def __init__(self, methodName='runTest'):
"""Initialize Test Case."""
super(ServiceTest, self).__init__(methodName)
self._available_services = None
@classmethod
def setUpClass(cls):
"""Run class setup for running ceph service tests."""
super(ServiceTest, cls).setUpClass()
def setUp(self):
"""Run test setup."""
# Skip 'service' action tests on systems without systemd
result = zaza_model.run_on_unit(self.TESTED_UNIT, 'which systemctl')
if not result['Stdout']:
raise unittest.SkipTest("'service' action is not supported on "
"systems without 'systemd'. Skipping "
"tests.")
# Note(mkalcok): This counter reset is needed because ceph-osd service
# is limited to 3 restarts per 30 mins which is insufficient
# when running functional tests for 'service' action. This
# limitation is defined in /lib/systemd/system/ceph-osd@.service
# in section [Service] with options 'StartLimitInterval' and
# 'StartLimitBurst'
reset_counter = 'systemctl reset-failed'
zaza_model.run_on_unit(self.TESTED_UNIT, reset_counter)
def tearDown(self):
"""Start ceph-osd services after each test.
This ensures that the environment is ready for the next tests.
"""
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start',
action_params={'osds': 'all'},
raise_on_failure=True)
@property
def available_services(self):
"""Return list of all ceph-osd services present on the TESTED_UNIT."""
if self._available_services is None:
self._available_services = self._fetch_osd_services()
return self._available_services
def _fetch_osd_services(self):
"""Fetch all ceph-osd services present on the TESTED_UNIT."""
service_list = []
service_list_cmd = 'systemctl list-units --full --all ' \
'--no-pager -t service'
result = zaza_model.run_on_unit(self.TESTED_UNIT, service_list_cmd)
for line in result['Stdout'].split('\n'):
service_name = self.SERVICE_PATTERN.search(line)
if service_name:
service_id = int(service_name.group('service_id'))
service_list.append(OsdService(service_id))
return service_list
def test_start_stop_all_by_keyword(self):
"""Start and Stop all ceph-osd services using keyword 'all'."""
service_list = [service.name for service in self.available_services]
logging.info("Running 'service stop=all' action on {} "
"unit".format(self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT], 'stop',
action_params={'osds': 'all'})
wait_for_service(unit_name=self.TESTED_UNIT,
services=service_list,
target_status='stopped')
logging.info("Running 'service start=all' action on {} "
"unit".format(self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start',
action_params={'osds': 'all'})
wait_for_service(unit_name=self.TESTED_UNIT,
services=service_list,
target_status='running')
def test_start_stop_all_by_list(self):
"""Start and Stop all ceph-osd services using explicit list."""
service_list = [service.name for service in self.available_services]
service_ids = [str(service.id) for service in self.available_services]
action_params = ','.join(service_ids)
logging.info("Running 'service stop={}' action on {} "
"unit".format(action_params, self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'stop',
action_params={'osds': action_params})
wait_for_service(unit_name=self.TESTED_UNIT,
services=service_list,
target_status='stopped')
logging.info("Running 'service start={}' action on {} "
"unit".format(action_params, self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start',
action_params={'osds': action_params})
wait_for_service(unit_name=self.TESTED_UNIT,
services=service_list,
target_status='running')
def test_stop_specific(self):
"""Stop only specified ceph-osd service."""
if len(self.available_services) < 2:
raise unittest.SkipTest('This test can be performed only if '
'there\'s more than one ceph-osd service '
'present on the tested unit')
should_run = deepcopy(self.available_services)
to_stop = should_run.pop()
should_run = [service.name for service in should_run]
logging.info("Running 'service stop={} on {} "
"unit".format(to_stop.id, self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'stop',
action_params={'osds': to_stop.id})
wait_for_service(unit_name=self.TESTED_UNIT,
services=[to_stop.name, ],
target_status='stopped')
wait_for_service(unit_name=self.TESTED_UNIT,
services=should_run,
target_status='running')
def test_start_specific(self):
"""Start only specified ceph-osd service."""
if len(self.available_services) < 2:
raise unittest.SkipTest('This test can be performed only if '
'there\'s more than one ceph-osd service '
'present on the tested unit')
service_names = [service.name for service in self.available_services]
should_stop = deepcopy(self.available_services)
to_start = should_stop.pop()
should_stop = [service.name for service in should_stop]
# Note: can't stop ceph-osd.target as restarting a single OSD will
# cause this to start all of the OSDs when a single one starts.
logging.info("Stopping all running ceph-osd services")
service_stop_cmd = '; '.join(['systemctl stop {}'.format(service)
for service in service_names])
zaza_model.run_on_unit(self.TESTED_UNIT, service_stop_cmd)
wait_for_service(unit_name=self.TESTED_UNIT,
services=service_names,
target_status='stopped')
logging.info("Running 'service start={} on {} "
"unit".format(to_start.id, self.TESTED_UNIT))
zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start',
action_params={'osds': to_start.id})
wait_for_service(unit_name=self.TESTED_UNIT,
services=[to_start.name, ],
target_status='running')
wait_for_service(unit_name=self.TESTED_UNIT,
services=should_stop,
target_status='stopped')

View File

@@ -17,13 +17,140 @@ import json
import logging
import re
import cinderclient.exceptions as cinder_exceptions
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.model
import zaza.openstack.utilities.ceph
import zaza.openstack.utilities.openstack as openstack
from zaza.openstack.charm_tests.glance.setup import LTS_IMAGE_NAME
from zaza.openstack.charm_tests.glance.setup import (
LTS_IMAGE_NAME,
CIRROS_IMAGE_NAME)
DEFAULT_CINDER_RBD_MIRRORING_MODE = 'pool'
def get_cinder_rbd_mirroring_mode(cinder_ceph_app_name='cinder-ceph'):
"""Get the RBD mirroring mode for the Cinder Ceph pool.
:param cinder_ceph_app_name: Cinder Ceph Juju application name.
:type cinder_ceph_app_name: str
:returns: A string representing the RBD mirroring mode. It can be
either 'pool' or 'image'.
:rtype: str
"""
rbd_mirroring_mode_config = zaza.model.get_application_config(
cinder_ceph_app_name).get('rbd-mirroring-mode')
if rbd_mirroring_mode_config:
rbd_mirroring_mode = rbd_mirroring_mode_config.get(
'value', DEFAULT_CINDER_RBD_MIRRORING_MODE).lower()
else:
rbd_mirroring_mode = DEFAULT_CINDER_RBD_MIRRORING_MODE
return rbd_mirroring_mode
def get_glance_image(glance):
"""Get the Glance image object to be used by the Ceph tests.
It looks for the Cirros Glance image, and it's returned if it's found.
If the Cirros image is not found, it will try and find the Ubuntu
LTS image.
:param glance: Authenticated glanceclient
:type glance: glanceclient.Client
:returns: Glance image object
:rtype: glanceclient.image
"""
images = openstack.get_images_by_name(glance, CIRROS_IMAGE_NAME)
if images:
return images[0]
logging.info("Failed to find {} image, falling back to {}".format(
CIRROS_IMAGE_NAME,
LTS_IMAGE_NAME))
return openstack.get_images_by_name(glance, LTS_IMAGE_NAME)[0]
def setup_cinder_repl_volume_type(cinder, type_name='repl',
backend_name='cinder-ceph'):
"""Set up the Cinder volume replication type.
:param cinder: Authenticated cinderclient
:type cinder: cinder.Client
:param type_name: Cinder volume type name
:type type_name: str
:param backend_name: Cinder volume backend name with replication enabled.
:type backend_name: str
:returns: Cinder volume type object
:rtype: cinderclient.VolumeType
"""
try:
vol_type = cinder.volume_types.find(name=type_name)
except cinder_exceptions.NotFound:
vol_type = cinder.volume_types.create(type_name)
vol_type.set_keys(metadata={
'volume_backend_name': backend_name,
'replication_enabled': '<is> True',
})
return vol_type
# TODO: This function should be incorporated into
# 'zaza.openstack.utilities.openstack.create_volume' helper, once the below
# flakiness comments are addressed.
def create_cinder_volume(cinder, name='zaza', image_id=None, type_id=None):
"""Create a new Cinder volume.
:param cinder: Authenticated cinderclient.
:type cinder: cinder.Client
:param name: Volume name.
:type name: str
:param image_id: Glance image id, if the volume is created from image.
:type image_id: str
:param type_id: Cinder Volume type id, if the volume needs to use an
explicit volume type.
:type type_id: boolean
:returns: Cinder volume
:rtype: :class:`Volume`.
"""
# NOTE(fnordahl): for some reason create volume from image often fails
# when run just after deployment is finished. We should figure out
# why, resolve the underlying issue and then remove this.
#
# We do not use tenacity here as it will interfere with tenacity used
# in ``resource_reaches_status``
def create_volume(cinder, volume_params, retry=20):
if retry < 1:
return
volume = cinder.volumes.create(**volume_params)
try:
# Note(coreycb): stop_after_attempt is increased because using
# juju storage for ceph-osd backed by cinder on undercloud
# takes longer than the prior method of directory-backed OSD
# devices.
openstack.resource_reaches_status(
cinder.volumes, volume.id, msg='volume',
stop_after_attempt=20)
return volume
except AssertionError:
logging.info('retrying')
volume.delete()
return create_volume(cinder, volume_params, retry=retry - 1)
volume_params = {
'size': 8,
'name': name,
}
if image_id:
volume_params['imageRef'] = image_id
if type_id:
volume_params['volume_type'] = type_id
return create_volume(cinder, volume_params)
class CephRBDMirrorBase(test_utils.OpenStackBaseTest):
@@ -33,20 +160,26 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest):
def setUpClass(cls):
"""Run setup for ``ceph-rbd-mirror`` tests."""
super().setUpClass()
cls.cinder_ceph_app_name = 'cinder-ceph'
cls.test_cinder_volume_name = 'test-cinder-ceph-volume'
# get ready for multi-model Zaza
cls.site_a_model = cls.site_b_model = zaza.model.get_juju_model()
cls.site_b_app_suffix = '-b'
def run_status_action(self, application_name=None, model_name=None):
def run_status_action(self, application_name=None, model_name=None,
pools=[]):
"""Run status action, decode and return response."""
action_params = {
'verbose': True,
'format': 'json',
}
if len(pools) > 0:
action_params['pools'] = ','.join(pools)
result = zaza.model.run_action_on_leader(
application_name or self.application_name,
'status',
model_name=model_name,
action_params={
'verbose': True,
'format': 'json',
})
action_params=action_params)
return json.loads(result.results['output'])
def get_pools(self):
@@ -66,10 +199,26 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest):
model_name=self.site_b_model)
return sorted(site_a_pools.keys()), sorted(site_b_pools.keys())
def get_failover_pools(self):
"""Get the failover Ceph pools' names, from both sites.
If the Cinder RBD mirroring mode is 'image', the 'cinder-ceph' pool
needs to be excluded, since Cinder orchestrates the failover then.
:returns: Tuple with site-a pools and site-b pools.
:rtype: Tuple[List[str], List[str]]
"""
site_a_pools, site_b_pools = self.get_pools()
if get_cinder_rbd_mirroring_mode(self.cinder_ceph_app_name) == 'image':
site_a_pools.remove(self.cinder_ceph_app_name)
site_b_pools.remove(self.cinder_ceph_app_name)
return site_a_pools, site_b_pools
def wait_for_mirror_state(self, state, application_name=None,
model_name=None,
check_entries_behind_master=False,
require_images_in=[]):
require_images_in=[],
pools=[]):
"""Wait until all images reach requested state.
This function runs the ``status`` action and examines the data it
@@ -88,6 +237,9 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest):
:type check_entries_behind_master: bool
:param require_images_in: List of pools to require images in
:type require_images_in: list of str
:param pools: List of pools to run status on. If this is empty, the
status action will run on all the pools.
:type pools: list of str
:returns: True on success, never returns on failure
"""
rep = re.compile(r'.*entries_behind_master=(\d+)')
@@ -95,7 +247,8 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest):
try:
# encapsulate in try except to work around LP: #1820976
pool_status = self.run_status_action(
application_name=application_name, model_name=model_name)
application_name=application_name, model_name=model_name,
pools=pools)
except KeyError:
continue
for pool, status in pool_status.items():
@@ -122,6 +275,41 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest):
# all images with state has expected state
return True
def setup_test_cinder_volume(self):
"""Set up the test Cinder volume into the Ceph RBD mirror environment.
If the volume already exists, then it's returned.
Also, if the Cinder RBD mirroring mode is 'image', the volume will
use an explicit volume type with the appropriate replication flags.
Otherwise, it is just a simple Cinder volume using the default backend.
:returns: Cinder volume
:rtype: :class:`Volume`.
"""
session = openstack.get_overcloud_keystone_session()
cinder = openstack.get_cinder_session_client(session, version=3)
try:
return cinder.volumes.find(name=self.test_cinder_volume_name)
except cinder_exceptions.NotFound:
logging.info("Test Cinder volume doesn't exist. Creating it")
glance = openstack.get_glance_session_client(session)
image = get_glance_image(glance)
kwargs = {
'cinder': cinder,
'name': self.test_cinder_volume_name,
'image_id': image.id,
}
if get_cinder_rbd_mirroring_mode(self.cinder_ceph_app_name) == 'image':
volume_type = setup_cinder_repl_volume_type(
cinder,
backend_name=self.cinder_ceph_app_name)
kwargs['type_id'] = volume_type.id
return create_cinder_volume(**kwargs)
class CephRBDMirrorTest(CephRBDMirrorBase):
"""Encapsulate ``ceph-rbd-mirror`` tests."""
@@ -193,32 +381,7 @@ class CephRBDMirrorTest(CephRBDMirrorBase):
site B and subsequently comparing the contents we get a full end to end
test.
"""
session = openstack.get_overcloud_keystone_session()
glance = openstack.get_glance_session_client(session)
cinder = openstack.get_cinder_session_client(session)
image = next(glance.images.list(name=LTS_IMAGE_NAME))
# NOTE(fnordahl): for some reason create volume from image often fails
# when run just after deployment is finished. We should figure out
# why, resolve the underlying issue and then remove this.
#
# We do not use tenacity here as it will interfere with tenacity used
# in ``resource_reaches_status``
def create_volume_from_image(cinder, image, retry=5):
if retry < 1:
return
volume = cinder.volumes.create(8, name='zaza', imageRef=image.id)
try:
openstack.resource_reaches_status(
cinder.volumes, volume.id, msg='volume')
return volume
except AssertionError:
logging.info('retrying')
volume.delete()
return create_volume_from_image(cinder, image, retry=retry - 1)
volume = create_volume_from_image(cinder, image)
volume = self.setup_test_cinder_volume()
site_a_hash = zaza.openstack.utilities.ceph.get_rbd_hash(
zaza.model.get_lead_unit_name('ceph-mon',
model_name=self.site_a_model),
@@ -230,6 +393,8 @@ class CephRBDMirrorTest(CephRBDMirrorBase):
check_entries_behind_master=True,
application_name=self.application_name + self.site_b_app_suffix,
model_name=self.site_b_model)
logging.info('Checking the Ceph RBD hashes of the primary and '
'the secondary Ceph images')
site_b_hash = zaza.openstack.utilities.ceph.get_rbd_hash(
zaza.model.get_lead_unit_name('ceph-mon' + self.site_b_app_suffix,
model_name=self.site_b_model),
@@ -244,102 +409,399 @@ class CephRBDMirrorTest(CephRBDMirrorBase):
class CephRBDMirrorControlledFailoverTest(CephRBDMirrorBase):
"""Encapsulate ``ceph-rbd-mirror`` controlled failover tests."""
def test_fail_over_fall_back(self):
"""Validate controlled fail over and fall back."""
site_a_pools, site_b_pools = self.get_pools()
def execute_failover_juju_actions(self,
primary_site_app_name,
primary_site_model,
primary_site_pools,
secondary_site_app_name,
secondary_site_model,
secondary_site_pools):
"""Execute the failover Juju actions.
The failover / failback via Juju actions shares the same workflow. The
failback is just a failover with sites in reversed order.
This function encapsulates the tasks to failover a primary site to
a secondary site:
1. Demote primary site
2. Validation of the primary site demotion
3. Promote secondary site
4. Validation of the secondary site promotion
:param primary_site_app_name: Primary site Ceph RBD mirror app name.
:type primary_site_app_name: str
:param primary_site_model: Primary site Juju model name.
:type primary_site_model: str
:param primary_site_pools: Primary site pools.
:type primary_site_pools: List[str]
:param secondary_site_app_name: Secondary site Ceph RBD mirror
app name.
:type secondary_site_app_name: str
:param secondary_site_model: Secondary site Juju model name.
:type secondary_site_model: str
:param secondary_site_pools: Secondary site pools.
:type secondary_site_pools: List[str]
"""
# Check if primary and secondary pools sizes are the same.
self.assertEqual(len(primary_site_pools), len(secondary_site_pools))
# Run the 'demote' Juju action against the primary site pools.
logging.info('Demoting {} from model {}.'.format(
primary_site_app_name, primary_site_model))
result = zaza.model.run_action_on_leader(
'ceph-rbd-mirror',
primary_site_app_name,
'demote',
model_name=self.site_a_model,
action_params={})
model_name=primary_site_model,
action_params={
'pools': ','.join(primary_site_pools)
})
logging.info(result.results)
self.assertEqual(int(result.results['Code']), 0)
# Validate that the demoted pools count matches the total primary site
# pools count.
n_pools_demoted = len(result.results['output'].split('\n'))
self.assertEqual(len(site_a_pools), n_pools_demoted)
self.wait_for_mirror_state('up+unknown', model_name=self.site_a_model)
self.assertEqual(len(primary_site_pools), n_pools_demoted)
# At this point, both primary and secondary sites are demoted. Validate
# that the Ceph images, from both sites, report 'up+unknown', since
# there isn't a primary site at the moment.
logging.info('Waiting until {} is demoted.'.format(
primary_site_app_name))
self.wait_for_mirror_state(
'up+unknown',
application_name=self.application_name + self.site_b_app_suffix,
model_name=self.site_b_model)
application_name=primary_site_app_name,
model_name=primary_site_model,
pools=primary_site_pools)
self.wait_for_mirror_state(
'up+unknown',
application_name=secondary_site_app_name,
model_name=secondary_site_model,
pools=secondary_site_pools)
# Run the 'promote' Juju against the secondary site.
logging.info('Promoting {} from model {}.'.format(
secondary_site_app_name, secondary_site_model))
result = zaza.model.run_action_on_leader(
'ceph-rbd-mirror' + self.site_b_app_suffix,
secondary_site_app_name,
'promote',
model_name=self.site_b_model,
action_params={})
model_name=secondary_site_model,
action_params={
'pools': ','.join(secondary_site_pools)
})
logging.info(result.results)
self.assertEqual(int(result.results['Code']), 0)
# Validate that the promoted pools count matches the total secondary
# site pools count.
n_pools_promoted = len(result.results['output'].split('\n'))
self.assertEqual(len(site_b_pools), n_pools_promoted)
self.assertEqual(len(secondary_site_pools), n_pools_promoted)
# Validate that the Ceph images from the newly promoted site
# report 'up+stopped' state (which is reported by primary Ceph images).
logging.info('Waiting until {} is promoted.'.format(
secondary_site_app_name))
self.wait_for_mirror_state(
'up+stopped',
application_name=secondary_site_app_name,
model_name=secondary_site_model,
pools=secondary_site_pools)
# Validate that the Ceph images from site-a report 'up+replaying'
# (which is reported by secondary Ceph images).
self.wait_for_mirror_state(
'up+replaying',
model_name=self.site_a_model)
check_entries_behind_master=True,
application_name=primary_site_app_name,
model_name=primary_site_model,
pools=primary_site_pools)
def test_100_cinder_failover(self):
"""Validate controlled failover via the Cinder API.
This test only makes sense if Cinder RBD mirroring mode is 'image'.
It will return early, if this is not the case.
"""
cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode(
self.cinder_ceph_app_name)
if cinder_rbd_mirroring_mode != 'image':
logging.warning(
"Skipping 'test_100_cinder_failover' since Cinder RBD "
"mirroring mode is {}.".format(cinder_rbd_mirroring_mode))
return
session = openstack.get_overcloud_keystone_session()
cinder = openstack.get_cinder_session_client(session, version=3)
# Check if the Cinder volume host is available with replication
# enabled.
host = 'cinder@{}'.format(self.cinder_ceph_app_name)
svc = cinder.services.list(host=host, binary='cinder-volume')[0]
self.assertEqual(svc.replication_status, 'enabled')
self.assertEqual(svc.status, 'enabled')
# Setup the test Cinder volume
volume = self.setup_test_cinder_volume()
# Check if the volume is properly mirrored
self.wait_for_mirror_state(
'up+stopped',
'up+replaying',
check_entries_behind_master=True,
application_name=self.application_name + self.site_b_app_suffix,
model_name=self.site_b_model)
result = zaza.model.run_action_on_leader(
'ceph-rbd-mirror' + self.site_b_app_suffix,
'demote',
model_name=self.site_b_model,
action_params={
})
logging.info(result.results)
n_pools_demoted = len(result.results['output'].split('\n'))
self.assertEqual(len(site_a_pools), n_pools_demoted)
self.wait_for_mirror_state(
'up+unknown',
model_name=self.site_a_model)
self.wait_for_mirror_state(
'up+unknown',
application_name=self.application_name + self.site_b_app_suffix,
model_name=self.site_b_model)
pools=[self.cinder_ceph_app_name])
# Execute the Cinder volume failover
openstack.failover_cinder_volume_host(
cinder=cinder,
backend_name=self.cinder_ceph_app_name,
target_backend_id='ceph',
target_status='disabled',
target_replication_status='failed-over')
# Check if the test volume is still available after failover
self.assertEqual(cinder.volumes.get(volume.id).status, 'available')
def test_101_cinder_failback(self):
"""Validate controlled failback via the Cinder API.
This test only makes sense if Cinder RBD mirroring mode is 'image'.
It will return early, if this is not the case.
The test needs to be executed when the Cinder volume host is already
failed-over with the test volume on it.
"""
cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode(
self.cinder_ceph_app_name)
if cinder_rbd_mirroring_mode != 'image':
logging.warning(
"Skipping 'test_101_cinder_failback' since Cinder RBD "
"mirroring mode is {}.".format(cinder_rbd_mirroring_mode))
return
session = openstack.get_overcloud_keystone_session()
cinder = openstack.get_cinder_session_client(session, version=3)
# Check if the Cinder volume host is already failed-over
host = 'cinder@{}'.format(self.cinder_ceph_app_name)
svc = cinder.services.list(host=host, binary='cinder-volume')[0]
self.assertEqual(svc.replication_status, 'failed-over')
self.assertEqual(svc.status, 'disabled')
# Check if the test Cinder volume is already present. The method
# 'cinder.volumes.find' raises 404 if the volume is not found.
volume = cinder.volumes.find(name=self.test_cinder_volume_name)
# Execute the Cinder volume failback
openstack.failover_cinder_volume_host(
cinder=cinder,
backend_name=self.cinder_ceph_app_name,
target_backend_id='default',
target_status='enabled',
target_replication_status='enabled')
# Check if the test volume is still available after failback
self.assertEqual(cinder.volumes.get(volume.id).status, 'available')
def test_200_juju_failover(self):
"""Validate controlled failover via Juju actions."""
# Get the Ceph pools needed to failover
site_a_pools, site_b_pools = self.get_failover_pools()
# Execute the failover Juju actions with the appropriate parameters.
site_b_app_name = self.application_name + self.site_b_app_suffix
self.execute_failover_juju_actions(
primary_site_app_name=self.application_name,
primary_site_model=self.site_a_model,
primary_site_pools=site_a_pools,
secondary_site_app_name=site_b_app_name,
secondary_site_model=self.site_b_model,
secondary_site_pools=site_b_pools)
def test_201_juju_failback(self):
"""Validate controlled failback via Juju actions."""
# Get the Ceph pools needed to failback
site_a_pools, site_b_pools = self.get_failover_pools()
# Execute the failover Juju actions with the appropriate parameters.
# The failback operation is just a failover with sites in reverse
# order.
site_b_app_name = self.application_name + self.site_b_app_suffix
self.execute_failover_juju_actions(
primary_site_app_name=site_b_app_name,
primary_site_model=self.site_b_model,
primary_site_pools=site_b_pools,
secondary_site_app_name=self.application_name,
secondary_site_model=self.site_a_model,
secondary_site_pools=site_a_pools)
def test_203_juju_resync(self):
"""Validate the 'resync-pools' Juju action.
The 'resync-pools' Juju action is meant to flag Ceph images from the
secondary site to re-sync against the Ceph images from the primary
site.
This use case is useful when the Ceph secondary images are out of sync.
"""
# Get the Ceph pools needed to failback
_, site_b_pools = self.get_failover_pools()
# Run the 'resync-pools' Juju action against the pools from site-b.
# This will make sure that the Ceph images from site-b are properly
# synced with the primary images from site-a.
site_b_app_name = self.application_name + self.site_b_app_suffix
logging.info('Re-syncing {} from model {}'.format(
site_b_app_name, self.site_b_model))
result = zaza.model.run_action_on_leader(
'ceph-rbd-mirror',
'promote',
model_name=self.site_a_model,
action_params={
})
logging.info(result.results)
n_pools_promoted = len(result.results['output'].split('\n'))
self.assertEqual(len(site_b_pools), n_pools_promoted)
self.wait_for_mirror_state(
'up+stopped',
model_name=self.site_a_model)
result = zaza.model.run_action_on_leader(
'ceph-rbd-mirror' + self.site_b_app_suffix,
site_b_app_name,
'resync-pools',
model_name=self.site_b_model,
action_params={
'pools': ','.join(site_b_pools),
'i-really-mean-it': True,
})
logging.info(result.results)
self.assertEqual(int(result.results['Code']), 0)
# Validate that the Ceph images from site-b report 'up+replaying'
# (which is reported by secondary Ceph images). And check that images
# exist in Cinder and Glance pools.
self.wait_for_mirror_state(
'up+replaying',
application_name=self.application_name + self.site_b_app_suffix,
check_entries_behind_master=True,
application_name=site_b_app_name,
model_name=self.site_b_model,
require_images_in=['cinder-ceph', 'glance'])
require_images_in=[self.cinder_ceph_app_name, 'glance'],
pools=site_b_pools)
class CephRBDMirrorDisasterFailoverTest(CephRBDMirrorBase):
"""Encapsulate ``ceph-rbd-mirror`` destructive tests."""
def test_kill_site_a_fail_over(self):
"""Validate fail over after uncontrolled shutdown of primary."""
for application in 'ceph-rbd-mirror', 'ceph-mon', 'ceph-osd':
def apply_cinder_ceph_workaround(self):
"""Set minimal timeouts / retries to the Cinder Ceph backend.
This is needed because the failover via Cinder API will try to do a
demotion of the site-a. However, when site-a is down, and with the
default timeouts / retries, the operation takes an unreasonably amount
of time (or sometimes it never finishes).
"""
# These new config options need to be set under the Cinder Ceph backend
# section in the main Cinder config file.
# At the moment, we don't the possibility of using Juju config to set
# these options. And also, it's not even a good practice to have them
# in production.
# These should be set only to do the Ceph failover via Cinder API, and
# they need to be removed after.
configs = {
'rados_connect_timeout': '1',
'rados_connection_retries': '1',
'rados_connection_interval': '0',
'replication_connect_timeout': '1',
}
# Small Python script that will be executed via Juju run to update
# the Cinder config file.
update_cinder_conf_script = (
"import configparser; "
"config = configparser.ConfigParser(); "
"config.read('/etc/cinder/cinder.conf'); "
"{}"
"f = open('/etc/cinder/cinder.conf', 'w'); "
"config.write(f); "
"f.close()")
set_cmd = ''
for cfg_name in configs:
set_cmd += "config.set('{0}', '{1}', '{2}'); ".format(
self.cinder_ceph_app_name, cfg_name, configs[cfg_name])
script = update_cinder_conf_script.format(set_cmd)
# Run the workaround script via Juju run
zaza.model.run_on_leader(
self.cinder_ceph_app_name,
'python3 -c "{}"; systemctl restart cinder-volume'.format(script))
def kill_primary_site(self):
"""Simulate an unexpected primary site shutdown."""
logging.info('Killing the Ceph primary site')
for application in ['ceph-rbd-mirror', 'ceph-mon', 'ceph-osd']:
zaza.model.remove_application(
application,
model_name=self.site_a_model,
forcefully_remove_machines=True)
def test_100_forced_juju_failover(self):
"""Validate Ceph failover via Juju when the primary site is down.
* Kill the primary site
* Execute the forced failover via Juju actions
"""
# Get the site-b Ceph pools that need to be promoted
_, site_b_pools = self.get_failover_pools()
site_b_app_name = self.application_name + self.site_b_app_suffix
# Simulate primary site unexpected shutdown
self.kill_primary_site()
# Try and promote the site-b to primary.
result = zaza.model.run_action_on_leader(
'ceph-rbd-mirror' + self.site_b_app_suffix,
site_b_app_name,
'promote',
model_name=self.site_b_model,
action_params={
'pools': ','.join(site_b_pools),
})
self.assertEqual(int(result.results['Code']), 0)
# The site-b 'promote' Juju action is expected to fail, because the
# primary site is down.
self.assertEqual(result.status, 'failed')
# Retry to promote site-b using the 'force' Juju action parameter.
result = zaza.model.run_action_on_leader(
'ceph-rbd-mirror' + self.site_b_app_suffix,
site_b_app_name,
'promote',
model_name=self.site_b_model,
action_params={
'force': True,
'pools': ','.join(site_b_pools),
})
self.assertEqual(int(result.results['Code']), 0)
# Validate successful Juju action execution
self.assertEqual(result.status, 'completed')
def test_200_forced_cinder_failover(self):
"""Validate Ceph failover via Cinder when the primary site is down.
This test only makes sense if Cinder RBD mirroring mode is 'image'.
It will return early, if this is not the case.
This assumes that the primary site is already killed.
"""
cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode(
self.cinder_ceph_app_name)
if cinder_rbd_mirroring_mode != 'image':
logging.warning(
"Skipping 'test_200_cinder_failover_without_primary_site' "
"since Cinder RBD mirroring mode is {}.".format(
cinder_rbd_mirroring_mode))
return
# Make sure that the Cinder Ceph backend workaround is applied.
self.apply_cinder_ceph_workaround()
session = openstack.get_overcloud_keystone_session()
cinder = openstack.get_cinder_session_client(session, version=3)
openstack.failover_cinder_volume_host(
cinder=cinder,
backend_name=self.cinder_ceph_app_name,
target_backend_id='ceph',
target_status='disabled',
target_replication_status='failed-over')
# Check that the Cinder volumes are still available after forced
# failover.
for volume in cinder.volumes.list():
self.assertEqual(volume.status, 'available')

View File

@@ -14,7 +14,23 @@
"""Setup for ceph-osd deployments."""
import logging
import zaza.model
def basic_setup():
"""Run basic setup for ceph-osd."""
pass
def ceph_ready():
"""Wait for ceph to be ready.
Wait for ceph to be ready. This is useful if the target_deploy_status in
the tests.yaml is expecting ceph to be in a blocked state. After ceph
has been unblocked the deploy may need to wait for ceph to be ready.
"""
logging.info("Waiting for ceph units to settle")
zaza.model.wait_for_application_states()
zaza.model.block_until_all_units_idle()
logging.info("Ceph units settled")

View File

@@ -15,11 +15,13 @@
"""Ceph Testing."""
import unittest
import json
import logging
from os import (
listdir,
path
)
import requests
import tempfile
import tenacity
@@ -31,7 +33,7 @@ import zaza.model as zaza_model
import zaza.openstack.utilities.ceph as zaza_ceph
import zaza.openstack.utilities.exceptions as zaza_exceptions
import zaza.openstack.utilities.generic as zaza_utils
import zaza.openstack.utilities.juju as zaza_juju
import zaza.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as zaza_openstack
@@ -56,7 +58,7 @@ class CephLowLevelTest(test_utils.OpenStackBaseTest):
}
ceph_osd_processes = {
'ceph-osd': [2, 3]
'ceph-osd': [1, 2, 3]
}
# Units with process names and PID quantities expected
@@ -95,6 +97,16 @@ class CephLowLevelTest(test_utils.OpenStackBaseTest):
target_status='running'
)
@test_utils.skipUntilVersion('ceph-mon', 'ceph', '14.2.0')
def test_pg_tuning(self):
"""Verify that auto PG tuning is enabled for Nautilus+."""
unit_name = 'ceph-mon/0'
cmd = "ceph osd pool autoscale-status --format=json"
result = zaza_model.run_on_unit(unit_name, cmd)
self.assertEqual(result['Code'], '0')
for pool in json.loads(result['Stdout']):
self.assertEqual(pool['pg_autoscale_mode'], 'on')
class CephRelationTest(test_utils.OpenStackBaseTest):
"""Ceph's relations test class."""
@@ -112,7 +124,7 @@ class CephRelationTest(test_utils.OpenStackBaseTest):
relation_name = 'osd'
remote_unit = zaza_model.get_unit_from_name(remote_unit_name)
remote_ip = remote_unit.public_address
relation = zaza_juju.get_relation_from_unit(
relation = juju_utils.get_relation_from_unit(
unit_name,
remote_unit_name,
relation_name
@@ -138,11 +150,10 @@ class CephRelationTest(test_utils.OpenStackBaseTest):
fsid = result.get('Stdout').strip()
expected = {
'private-address': remote_ip,
'auth': 'none',
'ceph-public-address': remote_ip,
'fsid': fsid,
}
relation = zaza_juju.get_relation_from_unit(
relation = juju_utils.get_relation_from_unit(
unit_name,
remote_unit_name,
relation_name
@@ -360,6 +371,19 @@ class CephTest(test_utils.OpenStackBaseTest):
As the ephemeral device will have data on it we can use it to validate
that these checks work as intended.
"""
current_release = zaza_openstack.get_os_release()
focal_ussuri = zaza_openstack.get_os_release('focal_ussuri')
if current_release >= focal_ussuri:
# NOTE(ajkavanagh) - focal (on ServerStack) is broken for /dev/vdb
# and so this test can't pass: LP#1842751 discusses the issue, but
# basically the snapd daemon along with lxcfs results in /dev/vdb
# being mounted in the lxcfs process namespace. If the charm
# 'tries' to umount it, it can (as root), but the mount is still
# 'held' by lxcfs and thus nothing else can be done with it. This
# is only a problem in serverstack with images with a default
# /dev/vdb ephemeral
logging.warn("Skipping pristine disk test for focal and higher")
return
logging.info('Checking behaviour when non-pristine disks appear...')
logging.info('Configuring ephemeral-unmount...')
alternate_conf = {
@@ -408,9 +432,14 @@ class CephTest(test_utils.OpenStackBaseTest):
set_default = {
'ephemeral-unmount': '',
'osd-devices': '/dev/vdb /srv/ceph',
'osd-devices': '/dev/vdb',
}
current_release = zaza_openstack.get_os_release()
bionic_train = zaza_openstack.get_os_release('bionic_train')
if current_release < bionic_train:
set_default['osd-devices'] = '/dev/vdb /srv/ceph'
logging.info('Restoring to default configuration...')
zaza_model.set_application_config(juju_service, set_default)
@@ -515,7 +544,7 @@ class CephRGWTest(test_utils.OpenStackBaseTest):
@classmethod
def setUpClass(cls):
"""Run class setup for running ceph low level tests."""
super(CephRGWTest, cls).setUpClass()
super(CephRGWTest, cls).setUpClass(application_name='ceph-radosgw')
@property
def expected_apps(self):
@@ -577,6 +606,12 @@ class CephRGWTest(test_utils.OpenStackBaseTest):
target_status='running'
)
# When testing with TLS there is a chance the deployment will appear done
# and idle prior to ceph-radosgw and Keystone have updated the service
# catalog. Retry the test in this circumstance.
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=10, max=300),
reraise=True, stop=tenacity.stop_after_attempt(10),
retry=tenacity.retry_if_exception_type(IOError))
def test_object_storage(self):
"""Verify object storage API.
@@ -587,10 +622,13 @@ class CephRGWTest(test_utils.OpenStackBaseTest):
'multisite configuration')
logging.info('Checking Swift REST API')
keystone_session = zaza_openstack.get_overcloud_keystone_session()
region_name = 'RegionOne'
region_name = zaza_model.get_application_config(
self.application_name,
model_name=self.model_name)['region']['value']
swift_client = zaza_openstack.get_swift_session_client(
keystone_session,
region_name
region_name,
cacert=self.cacert,
)
_container = 'demo-container'
_test_data = 'Test data from Zaza'
@@ -614,7 +652,8 @@ class CephRGWTest(test_utils.OpenStackBaseTest):
keystone_session = zaza_openstack.get_overcloud_keystone_session()
source_client = zaza_openstack.get_swift_session_client(
keystone_session,
region_name='east-1'
region_name='east-1',
cacert=self.cacert,
)
_container = 'demo-container'
_test_data = 'Test data from Zaza'
@@ -628,7 +667,8 @@ class CephRGWTest(test_utils.OpenStackBaseTest):
target_client = zaza_openstack.get_swift_session_client(
keystone_session,
region_name='east-1'
region_name='east-1',
cacert=self.cacert,
)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
@@ -660,11 +700,13 @@ class CephRGWTest(test_utils.OpenStackBaseTest):
keystone_session = zaza_openstack.get_overcloud_keystone_session()
source_client = zaza_openstack.get_swift_session_client(
keystone_session,
region_name='east-1'
region_name='east-1',
cacert=self.cacert,
)
target_client = zaza_openstack.get_swift_session_client(
keystone_session,
region_name='west-1'
region_name='west-1',
cacert=self.cacert,
)
zaza_model.run_action_on_leader(
'slave-ceph-radosgw',
@@ -706,7 +748,269 @@ class CephProxyTest(unittest.TestCase):
def test_ceph_health(self):
"""Make sure ceph-proxy can communicate with ceph."""
logging.info('Wait for idle/ready status...')
zaza_model.wait_for_application_states()
self.assertEqual(
zaza_model.run_on_leader("ceph-proxy", "sudo ceph health")["Code"],
"0"
)
def test_cinder_ceph_restrict_pool_setup(self):
"""Make sure cinder-ceph restrict pool was created successfully."""
logging.info('Wait for idle/ready status...')
zaza_model.wait_for_application_states()
pools = zaza_ceph.get_ceph_pools('ceph-mon/0')
if 'cinder-ceph' not in pools:
msg = 'cinder-ceph pool was not found upon querying ceph-mon/0'
raise zaza_exceptions.CephPoolNotFound(msg)
# Checking for cinder-ceph specific permissions makes
# the test more rugged when we add additional relations
# to ceph for other applications (such as glance and nova).
expected_permissions = [
"allow rwx pool=cinder-ceph",
"allow class-read object_prefix rbd_children",
]
cmd = "sudo ceph auth get client.cinder-ceph"
result = zaza_model.run_on_unit('ceph-mon/0', cmd)
output = result.get('Stdout').strip()
for expected in expected_permissions:
if expected not in output:
msg = ('cinder-ceph pool restriction ({}) was not'
' configured correctly.'
' Found: {}'.format(expected, output))
raise zaza_exceptions.CephPoolNotConfigured(msg)
class CephPrometheusTest(unittest.TestCase):
"""Test the Ceph <-> Prometheus relation."""
def test_prometheus_metrics(self):
"""Validate that Prometheus has Ceph metrics."""
try:
zaza_model.get_application(
'prometheus2')
except KeyError:
raise unittest.SkipTest('Prometheus not present, skipping test')
unit = zaza_model.get_unit_from_name(
zaza_model.get_lead_unit_name('prometheus2'))
self.assertEqual(
'3', _get_mon_count_from_prometheus(unit.public_address))
class CephPoolConfig(Exception):
"""Custom Exception for bad Ceph pool config."""
pass
class CheckPoolTypes(unittest.TestCase):
"""Test the ceph pools created for clients are of the expected type."""
def test_check_pool_types(self):
"""Check type of pools created for clients."""
app_pools = [
('glance', 'glance'),
('nova-compute', 'nova'),
('cinder-ceph', 'cinder-ceph')]
runtime_pool_details = zaza_ceph.get_ceph_pool_details()
for app, pool_name in app_pools:
try:
app_config = zaza_model.get_application_config(app)
except KeyError:
logging.info(
'Skipping pool check of %s, application %s not present',
pool_name,
app)
continue
rel_id = zaza_model.get_relation_id(
app,
'ceph-mon',
remote_interface_name='client')
if not rel_id:
logging.info(
'Skipping pool check of %s, ceph relation not present',
app)
continue
juju_pool_config = app_config.get('pool-type')
if juju_pool_config:
expected_pool_type = juju_pool_config['value']
else:
# If the pool-type option is absent assume the default of
# replicated.
expected_pool_type = zaza_ceph.REPLICATED_POOL_TYPE
for pool_config in runtime_pool_details:
if pool_config['pool_name'] == pool_name:
logging.info('Checking {} is {}'.format(
pool_name,
expected_pool_type))
expected_pool_code = -1
if expected_pool_type == zaza_ceph.REPLICATED_POOL_TYPE:
expected_pool_code = zaza_ceph.REPLICATED_POOL_CODE
elif expected_pool_type == zaza_ceph.ERASURE_POOL_TYPE:
expected_pool_code = zaza_ceph.ERASURE_POOL_CODE
self.assertEqual(
pool_config['type'],
expected_pool_code)
break
else:
raise CephPoolConfig(
"Failed to find config for {}".format(pool_name))
# NOTE: We might query before prometheus has fetch data
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1,
min=5, max=10),
reraise=True)
def _get_mon_count_from_prometheus(prometheus_ip):
url = ('http://{}:9090/api/v1/query?query='
'count(ceph_mon_metadata)'.format(prometheus_ip))
client = requests.session()
response = client.get(url)
logging.debug("Prometheus response: {}".format(response.json()))
return response.json()['data']['result'][0]['value'][1]
class BlueStoreCompressionCharmOperation(test_utils.BaseCharmTest):
"""Test charm handling of bluestore compression configuration options."""
@classmethod
def setUpClass(cls):
"""Perform class one time initialization."""
super(BlueStoreCompressionCharmOperation, cls).setUpClass()
release_application = 'keystone'
try:
zaza_model.get_application(release_application)
except KeyError:
release_application = 'ceph-mon'
cls.current_release = zaza_openstack.get_os_release(
application=release_application)
cls.bionic_rocky = zaza_openstack.get_os_release('bionic_rocky')
def setUp(self):
"""Perform common per test initialization steps."""
super(BlueStoreCompressionCharmOperation, self).setUp()
# determine if the tests should be run or not
logging.debug('os_release: {} >= {} = {}'
.format(self.current_release,
self.bionic_rocky,
self.current_release >= self.bionic_rocky))
self.mimic_or_newer = self.current_release >= self.bionic_rocky
def _assert_pools_properties(self, pools, pools_detail,
expected_properties, log_func=logging.info):
"""Check properties on a set of pools.
:param pools: List of pool names to check.
:type pools: List[str]
:param pools_detail: List of dictionaries with pool detail
:type pools_detail List[Dict[str,any]]
:param expected_properties: Properties to check and their expected
values.
:type expected_properties: Dict[str,any]
:returns: Nothing
:raises: AssertionError
"""
for pool in pools:
for pd in pools_detail:
if pd['pool_name'] == pool:
if 'options' in expected_properties:
for k, v in expected_properties['options'].items():
self.assertEquals(pd['options'][k], v)
log_func("['options']['{}'] == {}".format(k, v))
for k, v in expected_properties.items():
if k == 'options':
continue
self.assertEquals(pd[k], v)
log_func("{} == {}".format(k, v))
def test_configure_compression(self):
"""Enable compression and validate properties flush through to pool."""
if not self.mimic_or_newer:
logging.info('Skipping test, Mimic or newer required.')
return
if self.application_name == 'ceph-osd':
# The ceph-osd charm itself does not request pools, neither does
# the BlueStore Compression configuration options it have affect
# pool properties.
logging.info('test does not apply to ceph-osd charm.')
return
elif self.application_name == 'ceph-radosgw':
# The Ceph RadosGW creates many light weight pools to keep track of
# metadata, we only compress the pool containing actual data.
app_pools = ['.rgw.buckets.data']
else:
# Retrieve which pools the charm under test has requested skipping
# metadata pools as they are deliberately not compressed.
app_pools = [
pool
for pool in zaza_ceph.get_pools_from_broker_req(
self.application_name, model_name=self.model_name)
if 'metadata' not in pool
]
ceph_pools_detail = zaza_ceph.get_ceph_pool_details(
model_name=self.model_name)
logging.debug('BEFORE: {}'.format(ceph_pools_detail))
try:
logging.info('Checking Ceph pool compression_mode prior to change')
self._assert_pools_properties(
app_pools, ceph_pools_detail,
{'options': {'compression_mode': 'none'}})
except KeyError:
logging.info('property does not exist on pool, which is OK.')
logging.info('Changing "bluestore-compression-mode" to "force" on {}'
.format(self.application_name))
with self.config_change(
{'bluestore-compression-mode': 'none'},
{'bluestore-compression-mode': 'force'}):
# Retrieve pool details from Ceph after changing configuration
ceph_pools_detail = zaza_ceph.get_ceph_pool_details(
model_name=self.model_name)
logging.debug('CONFIG_CHANGE: {}'.format(ceph_pools_detail))
logging.info('Checking Ceph pool compression_mode after to change')
self._assert_pools_properties(
app_pools, ceph_pools_detail,
{'options': {'compression_mode': 'force'}})
ceph_pools_detail = zaza_ceph.get_ceph_pool_details(
model_name=self.model_name)
logging.debug('AFTER: {}'.format(ceph_pools_detail))
logging.debug(juju_utils.get_relation_from_unit(
'ceph-mon', self.application_name, None,
model_name=self.model_name))
logging.info('Checking Ceph pool compression_mode after restoring '
'config to previous value')
self._assert_pools_properties(
app_pools, ceph_pools_detail,
{'options': {'compression_mode': 'none'}})
def test_invalid_compression_configuration(self):
"""Set invalid configuration and validate charm response."""
if not self.mimic_or_newer:
logging.info('Skipping test, Mimic or newer required.')
return
stored_target_deploy_status = self.test_config.get(
'target_deploy_status', {})
new_target_deploy_status = stored_target_deploy_status.copy()
new_target_deploy_status[self.application_name] = {
'workload-status': 'blocked',
'workload-status-message': 'Invalid configuration',
}
if 'target_deploy_status' in self.test_config:
self.test_config['target_deploy_status'].update(
new_target_deploy_status)
else:
self.test_config['target_deploy_status'] = new_target_deploy_status
with self.config_change(
{'bluestore-compression-mode': 'none'},
{'bluestore-compression-mode': 'PEBCAK'}):
logging.info('Charm went into blocked state as expected, restore '
'configuration')
self.test_config[
'target_deploy_status'] = stored_target_deploy_status

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test charm upgrade."""

View File

@@ -0,0 +1,82 @@
#!/usr/bin/env python3
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define class for Charm Upgrade."""
import logging
import unittest
import zaza.model
from zaza.openstack.utilities import (
cli as cli_utils,
upgrade_utils as upgrade_utils,
)
from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest
class FullCloudCharmUpgradeTest(unittest.TestCase):
"""Class to encapsulate Charm Upgrade Tests."""
@classmethod
def setUpClass(cls):
"""Run setup for Charm Upgrades."""
cli_utils.setup_logging()
cls.lts = LTSGuestCreateTest()
cls.lts.setUpClass()
cls.target_charm_namespace = '~openstack-charmers-next'
def get_upgrade_url(self, charm_url):
"""Return the charm_url to upgrade to.
:param charm_url: Current charm url.
:type charm_url: str
"""
charm_name = upgrade_utils.extract_charm_name_from_url(
charm_url)
next_charm_url = zaza.model.get_latest_charm_url(
"cs:{}/{}".format(self.target_charm_namespace, charm_name))
return next_charm_url
def test_200_run_charm_upgrade(self):
"""Run charm upgrade."""
self.lts.test_launch_small_instance()
applications = zaza.model.get_status().applications
groups = upgrade_utils.get_charm_upgrade_groups(
extra_filters=[upgrade_utils._filter_etcd,
upgrade_utils._filter_easyrsa,
upgrade_utils._filter_memcached])
for group_name, group in groups:
logging.info("About to upgrade {} ({})".format(group_name, group))
for application, app_details in applications.items():
if application not in group:
continue
target_url = self.get_upgrade_url(app_details['charm'])
if target_url == app_details['charm']:
logging.warn(
"Skipping upgrade of {}, already using {}".format(
application,
target_url))
else:
logging.info("Upgrading {} to {}".format(
application,
target_url))
zaza.model.upgrade_charm(
application,
switch=target_url)
logging.info("Waiting for charm url to update")
zaza.model.block_until_charm_url(application, target_url)
zaza.model.block_until_all_units_idle()
self.lts.test_launch_small_instance()

View File

@@ -23,6 +23,12 @@ import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.glance.setup as glance_setup
from tenacity import (
Retrying,
stop_after_attempt,
wait_exponential,
)
class CinderTests(test_utils.OpenStackBaseTest):
"""Encapsulate Cinder tests."""
@@ -32,7 +38,10 @@ class CinderTests(test_utils.OpenStackBaseTest):
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(CinderTests, cls).setUpClass()
super(CinderTests, cls).setUpClass(application_name='cinder')
cls.application_name = 'cinder'
cls.lead_unit = zaza.model.get_lead_unit_name(
"cinder", model_name=cls.model_name)
cls.cinder_client = openstack_utils.get_cinder_session_client(
cls.keystone_session)
cls.nova_client = openstack_utils.get_nova_session_client(
@@ -42,18 +51,66 @@ class CinderTests(test_utils.OpenStackBaseTest):
def tearDown(cls):
"""Remove test resources."""
logging.info('Running teardown')
for snapshot in cls.cinder_client.volume_snapshots.list():
for attempt in Retrying(
stop=stop_after_attempt(8),
wait=wait_exponential(multiplier=1, min=2, max=60)):
with attempt:
volumes = list(cls.cinder_client.volumes.list())
snapped_volumes = [v for v in volumes
if v.name.endswith("-from-snap")]
if snapped_volumes:
logging.info("Removing volumes from snapshot")
cls._remove_volumes(snapped_volumes)
volumes = list(cls.cinder_client.volumes.list())
snapshots = list(cls.cinder_client.volume_snapshots.list())
if snapshots:
logging.info("tearDown - snapshots: {}".format(
", ".join(s.name for s in snapshots)))
cls._remove_snapshots(snapshots)
if volumes:
logging.info("tearDown - volumes: {}".format(
", ".join(v.name for v in volumes)))
cls._remove_volumes(volumes)
@classmethod
def _remove_snapshots(cls, snapshots):
"""Remove snapshots passed as param.
:param volumes: the snapshots to delete
:type volumes: List[snapshot objects]
"""
for snapshot in snapshots:
if snapshot.name.startswith(cls.RESOURCE_PREFIX):
openstack_utils.delete_resource(
cls.cinder_client.volume_snapshots,
snapshot.id,
msg="snapshot")
for volume in cls.cinder_client.volumes.list():
logging.info("removing snapshot: {}".format(snapshot.name))
try:
openstack_utils.delete_resource(
cls.cinder_client.volume_snapshots,
snapshot.id,
msg="snapshot")
except Exception as e:
logging.error("error removing snapshot: {}".format(str(e)))
raise
@classmethod
def _remove_volumes(cls, volumes):
"""Remove volumes passed as param.
:param volumes: the volumes to delete
:type volumes: List[volume objects]
"""
for volume in volumes:
if volume.name.startswith(cls.RESOURCE_PREFIX):
openstack_utils.delete_resource(
cls.cinder_client.volumes,
volume.id,
msg="volume")
logging.info("removing volume: {}".format(volume.name))
try:
openstack_utils.delete_resource(
cls.cinder_client.volumes,
volume.id,
msg="volume")
except Exception as e:
logging.error("error removing volume: {}".format(str(e)))
raise
def test_100_volume_create_extend_delete(self):
"""Test creating, extending a volume."""
@@ -63,6 +120,8 @@ class CinderTests(test_utils.OpenStackBaseTest):
openstack_utils.resource_reaches_status(
self.cinder_client.volumes,
vol_new.id,
wait_iteration_max_time=1200,
stop_after_attempt=20,
expected_status="available",
msg="Volume status wait")
self.cinder_client.volumes.extend(
@@ -71,20 +130,30 @@ class CinderTests(test_utils.OpenStackBaseTest):
openstack_utils.resource_reaches_status(
self.cinder_client.volumes,
vol_new.id,
wait_iteration_max_time=1200,
stop_after_attempt=20,
expected_status="available",
msg="Volume status wait")
def test_105_volume_create_from_img(self):
"""Test creating a volume from an image."""
logging.debug("finding image {} ..."
.format(glance_setup.LTS_IMAGE_NAME))
image = self.nova_client.glance.find_image(
glance_setup.LTS_IMAGE_NAME)
logging.debug("using cinder_client to create volume from image {}"
.format(image.id))
vol_img = self.cinder_client.volumes.create(
name='{}-105-vol-from-img'.format(self.RESOURCE_PREFIX),
size=3,
imageRef=image.id)
logging.debug("now waiting for volume {} to reach available"
.format(vol_img.id))
openstack_utils.resource_reaches_status(
self.cinder_client.volumes,
vol_img.id,
wait_iteration_max_time=1200,
stop_after_attempt=20,
expected_status="available",
msg="Volume status wait")
@@ -97,6 +166,8 @@ class CinderTests(test_utils.OpenStackBaseTest):
openstack_utils.resource_reaches_status(
self.cinder_client.volumes,
vol_new.id,
wait_iteration_max_time=1200,
stop_after_attempt=20,
expected_status="available",
msg="Volume status wait")
@@ -107,6 +178,8 @@ class CinderTests(test_utils.OpenStackBaseTest):
openstack_utils.resource_reaches_status(
self.cinder_client.volume_snapshots,
snap_new.id,
wait_iteration_max_time=1200,
stop_after_attempt=20,
expected_status="available",
msg="Volume status wait")
@@ -118,6 +191,8 @@ class CinderTests(test_utils.OpenStackBaseTest):
openstack_utils.resource_reaches_status(
self.cinder_client.volumes,
vol_from_snap.id,
wait_iteration_max_time=1200,
stop_after_attempt=20,
expected_status="available",
msg="Volume status wait")
@@ -129,6 +204,8 @@ class CinderTests(test_utils.OpenStackBaseTest):
openstack_utils.resource_reaches_status(
self.cinder_client.volumes,
vol_new.id,
wait_iteration_max_time=1200,
stop_after_attempt=20,
expected_status="available",
msg="Volume status wait")
vol_new.force_delete()
@@ -139,36 +216,38 @@ class CinderTests(test_utils.OpenStackBaseTest):
@property
def services(self):
"""Return a list services for OpenStack release."""
services = ['cinder-scheduler', 'cinder-volume']
if (openstack_utils.get_os_release() >=
openstack_utils.get_os_release('xenial_ocata')):
services.append('apache2')
"""Return a list services for the selected OpenStack release."""
current_value = zaza.model.get_application_config(
self.application_name)['enabled-services']['value']
if current_value == "all":
services = ['cinder-scheduler', 'cinder-volume', 'cinder-api']
else:
services.append('cinder-api')
services = ['cinder-{}'.format(svc)
for svc in ('api', 'scheduler', 'volume')
if svc in current_value]
if ('cinder-api' in services and
(openstack_utils.get_os_release() >=
openstack_utils.get_os_release('xenial_ocata'))):
services.remove('cinder-api')
services.append('apache2')
return services
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change.
Change disk format and assert then change propagates to the correct
Change debug mode and assert that change propagates to the correct
file and that services are restarted as a result
"""
# Expected default and alternate values
set_default = {'debug': 'False'}
set_alternate = {'debug': 'True'}
# Config file affected by juju set config change
conf_file = '/etc/cinder/cinder.conf'
# Make config change, check for service restarts
logging.debug('Setting disk format glance...')
self.restart_on_changed(
logging.debug('Setting debug mode...')
self.restart_on_changed_debug_oslo_config_file(
conf_file,
set_default,
set_alternate,
{'DEFAULT': {'debug': ['False']}},
{'DEFAULT': {'debug': ['True']}},
self.services)
def test_901_pause_resume(self):
@@ -177,13 +256,7 @@ class CinderTests(test_utils.OpenStackBaseTest):
Pause service and check services are stopped then resume and check
they are started
"""
services = ['cinder-scheduler', 'cinder-volume']
if (openstack_utils.get_os_release() >=
openstack_utils.get_os_release('xenial_ocata')):
services.append('apache2')
else:
services.append('cinder-api')
with self.pause_resume(services):
with self.pause_resume(self.services):
logging.info("Testing pause resume")

View File

@@ -0,0 +1,17 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing cinder-backup."""

View File

@@ -0,0 +1,221 @@
#!/usr/bin/env python3
#
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate cinder-backup testing."""
import copy
import logging
import tenacity
import zaza.model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.ceph as ceph_utils
import zaza.openstack.utilities.openstack as openstack_utils
class CinderBackupTest(test_utils.OpenStackBaseTest):
"""Encapsulate Cinder Backup tests."""
RESOURCE_PREFIX = 'zaza-cinderbackuptests'
@classmethod
def setUpClass(cls):
"""Run class setup for running Cinder Backup tests."""
super(CinderBackupTest, cls).setUpClass()
cls.cinder_client = openstack_utils.get_cinder_session_client(
cls.keystone_session)
@property
def services(self):
"""Return a list services for the selected OpenStack release."""
current_release = openstack_utils.get_os_release()
services = ['cinder-scheduler', 'cinder-volume']
if (current_release >=
openstack_utils.get_os_release('xenial_ocata')):
services.append('apache2')
else:
services.append('cinder-api')
return services
def test_100_volume_create_extend_delete(self):
"""Test creating, extending a volume."""
vol_new = openstack_utils.create_volume(
self.cinder_client,
name='{}-100-vol'.format(self.RESOURCE_PREFIX),
size=1)
self.cinder_client.volumes.extend(
vol_new.id,
'2')
openstack_utils.resource_reaches_status(
self.cinder_client.volumes,
vol_new.id,
expected_status="available",
msg="Extended volume")
def test_410_cinder_vol_create_backup_delete_restore_pool_inspect(self):
"""Create, backup, delete, restore a ceph-backed cinder volume.
Create, backup, delete, restore a ceph-backed cinder volume, and
inspect ceph cinder pool object count as the volume is created
and deleted.
"""
unit_name = zaza.model.get_lead_unit_name('ceph-mon')
obj_count_samples = []
pool_size_samples = []
pools = ceph_utils.get_ceph_pools(unit_name)
expected_pool = 'cinder-ceph'
cinder_ceph_pool = pools[expected_pool]
# Check ceph cinder pool object count, disk space usage and pool name
logging.info('Checking ceph cinder pool original samples...')
pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
unit_name, cinder_ceph_pool)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
self.assertEqual(pool_name, expected_pool)
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(3)):
with attempt:
# Create ceph-backed cinder volume
cinder_vol_name = '{}-410-{}-vol'.format(
self.RESOURCE_PREFIX, attempt.retry_state.attempt_number)
cinder_vol = self.cinder_client.volumes.create(
name=cinder_vol_name, size=1)
openstack_utils.resource_reaches_status(
self.cinder_client.volumes,
cinder_vol.id,
wait_iteration_max_time=180,
stop_after_attempt=15,
expected_status='available',
msg='ceph-backed cinder volume')
# Back up the volume
# NOTE(lourot): sometimes, especially on Mitaka, the backup
# remains stuck forever in 'creating' state and the volume in
# 'backing-up' state. See lp:1877076
# Attempting to create another volume and another backup
# usually then succeeds. Release notes and bug trackers show
# that many things have been fixed and are still left to be
# fixed in this area.
# When the backup creation succeeds, it usually does within
# 12 minutes.
vol_backup_name = '{}-410-{}-backup-vol'.format(
self.RESOURCE_PREFIX, attempt.retry_state.attempt_number)
vol_backup = self.cinder_client.backups.create(
cinder_vol.id, name=vol_backup_name)
openstack_utils.resource_reaches_status(
self.cinder_client.backups,
vol_backup.id,
wait_iteration_max_time=180,
stop_after_attempt=15,
expected_status='available',
msg='Backup volume')
# Delete the volume
openstack_utils.delete_volume(self.cinder_client, cinder_vol.id)
# Restore the volume
self.cinder_client.restores.restore(vol_backup.id)
openstack_utils.resource_reaches_status(
self.cinder_client.backups,
vol_backup.id,
wait_iteration_max_time=180,
stop_after_attempt=15,
expected_status='available',
msg='Restored backup volume')
# Delete the backup
openstack_utils.delete_volume_backup(
self.cinder_client,
vol_backup.id)
openstack_utils.resource_removed(
self.cinder_client.backups,
vol_backup.id,
wait_iteration_max_time=180,
stop_after_attempt=15,
msg="Backup volume")
# Re-check ceph cinder pool object count and disk usage
logging.info('Checking ceph cinder pool samples '
'after volume create...')
pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
unit_name, cinder_ceph_pool, self.model_name)
obj_count_samples.append(obj_count)
pool_size_samples.append(kb_used)
vols = self.cinder_client.volumes.list()
try:
cinder_vols = [v for v in vols if v.name == cinder_vol_name]
except AttributeError:
cinder_vols = [v for v in vols if
v.display_name == cinder_vol_name]
if not cinder_vols:
# NOTE(hopem): it appears that at some point cinder-backup stopped
# restoring volume metadata properly so revert to default name if
# original is not found
name = "restore_backup_{}".format(vol_backup.id)
try:
cinder_vols = [v for v in vols if v.name == name]
except AttributeError:
cinder_vols = [v for v in vols if v.display_name == name]
self.assertTrue(cinder_vols)
cinder_vol = cinder_vols[0]
# Delete restored cinder volume
openstack_utils.delete_volume(self.cinder_client, cinder_vol.id)
openstack_utils.resource_removed(
self.cinder_client.volumes,
cinder_vol.id,
wait_iteration_max_time=180,
stop_after_attempt=15,
msg="Volume")
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=10, max=300),
reraise=True, stop=tenacity.stop_after_attempt(10),
retry=tenacity.retry_if_exception_type(AssertionError))
def _check_get_ceph_pool_sample(obj_count_samples, pool_size_samples):
pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample(
unit_name, cinder_ceph_pool, self.model_name)
_obj_count_samples = copy.deepcopy(obj_count_samples)
_pool_size_samples = copy.deepcopy(pool_size_samples)
_obj_count_samples.append(obj_count)
_pool_size_samples.append(kb_used)
# Validate ceph cinder pool object count samples over time
original, created, deleted = range(3)
self.assertFalse(_obj_count_samples[created] <=
_obj_count_samples[original])
self.assertFalse(_obj_count_samples[deleted] >=
_obj_count_samples[created])
# Luminous (pike) ceph seems more efficient at disk usage so we
# cannot guarantee the ordering of kb_used
if (openstack_utils.get_os_release() <
openstack_utils.get_os_release('xenial_mitaka')):
self.assertFalse(_pool_size_samples[created] <=
_pool_size_samples[original])
self.assertFalse(_pool_size_samples[deleted] >=
_pool_size_samples[created])
# Final check, ceph cinder pool object count and disk usage
logging.info('Checking ceph cinder pool after volume delete...')
# It sometime takes a short time for removal to be reflected in
# get_ceph_pool_sample so wrap check in tenacity decorator to retry.
_check_get_ceph_pool_sample(obj_count_samples, pool_size_samples)

View File

@@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing cinder-backup-swift."""
"""Collection of code for setting up and testing cinder-backup-swift-proxy."""

View File

@@ -12,14 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for configuring cinder-backup-swift."""
"""Code for configuring cinder-backup-swift-proxy."""
import zaza.model as zaza_model
import zaza.openstack.charm_tests.test_utils
def configure_cinder_backup():
"""Configure cinder-backup-swift."""
"""Configure cinder-backup-swift-proxy."""
keystone_ip = zaza_model.get_app_ips(
'swift-keystone')[0]
swift_ip = zaza_model.get_app_ips(
@@ -32,17 +32,18 @@ def configure_cinder_backup():
else:
auth_url = 'http://{}:5000/v3'.format(keystone_ip)
endpoint_url = 'http://{}:8080/v1/AUTH'.format(swift_ip)
cinder_backup_swift_conf = {
cinder_backup_swift_proxy_conf = {
'endpoint-url': endpoint_url,
'auth-url': auth_url
}
juju_service = 'cinder-backup-swift'
zaza_model.set_application_config(juju_service, cinder_backup_swift_conf)
juju_service = 'cinder-backup-swift-proxy'
zaza_model.set_application_config(juju_service,
cinder_backup_swift_proxy_conf)
zaza_model.wait_for_agent_status()
zaza_model.wait_for_application_states()
_singleton = zaza.openstack.charm_tests.test_utils.OpenStackBaseTest()
_singleton.setUpClass()
with _singleton.config_change(cinder_backup_swift_conf,
cinder_backup_swift_conf):
with _singleton.config_change(cinder_backup_swift_proxy_conf,
cinder_backup_swift_proxy_conf):
# wait for configuration to be applied then return
pass

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing designate."""

View File

@@ -0,0 +1,328 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate designate testing."""
import logging
import unittest
import tenacity
import subprocess
import designateclient.v1.domains as domains
import designateclient.v1.records as records
import designateclient.v1.servers as servers
import zaza.model
import zaza.utilities.juju as juju_utils
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.designate.utils as designate_utils
import zaza.charm_lifecycle.utils as lifecycle_utils
class BaseDesignateTest(test_utils.OpenStackBaseTest):
"""Base for Designate charm tests."""
@classmethod
def setUpClass(cls, application_name=None, model_alias=None):
"""Run class setup for running Designate charm operation tests."""
application_name = application_name or "designate"
model_alias = model_alias or ""
super(BaseDesignateTest, cls).setUpClass(application_name, model_alias)
os_release = openstack_utils.get_os_release
if os_release() >= os_release('bionic_rocky'):
cls.designate_svcs = [
'designate-agent', 'designate-api', 'designate-central',
'designate-mdns', 'designate-worker', 'designate-sink',
'designate-producer',
]
else:
cls.designate_svcs = [
'designate-agent', 'designate-api', 'designate-central',
'designate-mdns', 'designate-pool-manager', 'designate-sink',
'designate-zone-manager',
]
# Get keystone session
cls.post_xenial_queens = os_release() >= os_release('xenial_queens')
overcloud_auth = openstack_utils.get_overcloud_auth()
keystone = openstack_utils.get_keystone_client(overcloud_auth)
keystone_session = openstack_utils.get_overcloud_keystone_session()
if cls.post_xenial_queens:
cls.designate = openstack_utils.get_designate_session_client(
session=keystone_session
)
cls.domain_list = cls.designate.zones.list
cls.domain_delete = cls.designate.zones.delete
cls.domain_create = cls.designate.zones.create
else:
# Authenticate admin with designate endpoint
designate_ep = keystone.service_catalog.url_for(
service_type='dns',
interface='publicURL')
keystone_ep = keystone.service_catalog.url_for(
service_type='identity',
interface='publicURL')
cls.designate = openstack_utils.get_designate_session_client(
version=1,
auth_url=keystone_ep,
token=keystone_session.get_token(),
tenant_name="admin",
endpoint=designate_ep)
cls.domain_list = cls.designate.domains.list
cls.domain_delete = cls.designate.domains.delete
cls.domain_create = cls.designate.domains.create
cls.server_list = cls.designate.servers.list
cls.server_create = cls.designate.servers.create
cls.server_delete = cls.designate.servers.delete
class DesignateAPITests(BaseDesignateTest):
"""Tests interact with designate api."""
TEST_DOMAIN = 'amuletexample.com.'
TEST_NS1_RECORD = 'ns1.{}'.format(TEST_DOMAIN)
TEST_NS2_RECORD = 'ns2.{}'.format(TEST_DOMAIN)
TEST_WWW_RECORD = "www.{}".format(TEST_DOMAIN)
TEST_RECORD = {TEST_WWW_RECORD: '10.0.0.23'}
def _get_server_id(self, server_name=None, server_id=None):
for srv in self.server_list():
if isinstance(srv, dict):
if srv['id'] == server_id or srv['name'] == server_name:
return srv['id']
elif srv.name == server_name or srv.id == server_id:
return srv.id
return None
def _wait_on_server_gone(self, server_id):
@tenacity.retry(
wait=tenacity.wait_exponential(multiplier=1, min=5, max=10),
reraise=True
)
def wait():
logging.debug('Waiting for server %s to disappear', server_id)
if self._get_server_id(server_id=server_id):
raise Exception("Server Exists")
self.server_delete(server_id)
return wait()
def test_400_server_creation(self):
"""Simple api calls to create a server."""
# Designate does not allow the last server to be deleted so ensure
# that ns1 is always present
if self.post_xenial_queens:
logging.info('Skipping server creation tests for Queens and above')
return
if not self._get_server_id(server_name=self.TEST_NS1_RECORD):
server = servers.Server(name=self.TEST_NS1_RECORD)
new_server = self.server_create(server)
self.assertIsNotNone(new_server)
logging.debug('Checking if server exists before trying to create it')
old_server_id = self._get_server_id(server_name=self.TEST_NS2_RECORD)
if old_server_id:
logging.debug('Deleting old server')
self._wait_on_server_gone(old_server_id)
logging.debug('Creating new server')
server = servers.Server(name=self.TEST_NS2_RECORD)
new_server = self.server_create(server)
self.assertIsNotNone(new_server, "Failed to Create Server")
self._wait_on_server_gone(self._get_server_id(self.TEST_NS2_RECORD))
def _get_domain_id(self, domain_name=None, domain_id=None):
for dom in self.domain_list():
if isinstance(dom, dict):
if dom['id'] == domain_id or dom['name'] == domain_name:
return dom['id']
elif dom.id == domain_id or dom.name == domain_name:
return dom.id
return None
def _wait_on_domain_gone(self, domain_id):
@tenacity.retry(
wait=tenacity.wait_exponential(multiplier=1, min=5, max=10),
reraise=True
)
def wait():
logging.debug('Waiting for domain %s to disappear', domain_id)
if self._get_domain_id(domain_id=domain_id):
raise Exception("Domain Exists")
self.domain_delete(domain_id)
wait()
@tenacity.retry(
wait=tenacity.wait_exponential(multiplier=1, min=5, max=10),
reraise=True
)
def _wait_to_resolve_test_record(self):
dns_ip = juju_utils.get_relation_from_unit(
'designate/0',
'designate-bind/0',
'dns-backend'
).get('private-address')
logging.info('Waiting for dns record to propagate @ {}'.format(dns_ip))
lookup_cmd = [
'dig', '+short', '@{}'.format(dns_ip),
self.TEST_WWW_RECORD]
cmd_out = subprocess.check_output(
lookup_cmd, universal_newlines=True).rstrip()
if not self.TEST_RECORD[self.TEST_WWW_RECORD] == cmd_out:
raise Exception("Record Doesn't Exist")
def test_400_domain_creation(self):
"""Simple api calls to create domain."""
logging.debug('Checking if domain exists before trying to create it')
old_dom_id = self._get_domain_id(domain_name=self.TEST_DOMAIN)
if old_dom_id:
logging.debug('Deleting old domain')
self._wait_on_domain_gone(old_dom_id)
logging.debug('Creating new domain')
domain = domains.Domain(
name=self.TEST_DOMAIN,
email="fred@amuletexample.com")
if self.post_xenial_queens:
new_domain = self.domain_create(
name=domain.name, email=domain.email)
else:
new_domain = self.domain_create(domain)
self.assertIsNotNone(new_domain)
logging.debug('Creating new test record')
_record = records.Record(
name=self.TEST_WWW_RECORD,
type="A",
data=self.TEST_RECORD[self.TEST_WWW_RECORD])
if self.post_xenial_queens:
domain_id = new_domain['id']
self.designate.recordsets.create(
domain_id, _record.name, _record.type, [_record.data])
else:
domain_id = new_domain.id
self.designate.records.create(domain_id, _record)
self._wait_to_resolve_test_record()
logging.debug('Tidy up delete test record')
self._wait_on_domain_gone(domain_id)
logging.debug('OK')
class DesignateCharmTests(BaseDesignateTest):
"""Designate charm restart and pause tests."""
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change.
Change debug mode and assert that change propagates to the correct
file and that services are restarted as a result
"""
# Services which are expected to restart upon config change,
# and corresponding config files affected by the change
conf_file = '/etc/designate/designate.conf'
# Make config change, check for service restarts
self.restart_on_changed_debug_oslo_config_file(
conf_file,
self.designate_svcs,
)
def test_910_pause_and_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started
"""
with self.pause_resume(
self.designate_svcs,
pgrep_full=False):
logging.info("Testing pause resume")
class DesignateTests(DesignateAPITests, DesignateCharmTests):
"""Collection of all Designate test classes."""
pass
class DesignateBindExpand(BaseDesignateTest):
"""Test expanding and shrinking bind."""
TEST_DOMAIN = 'zazabindtesting.com.'
TEST_NS1_RECORD = 'ns1.{}'.format(TEST_DOMAIN)
TEST_NS2_RECORD = 'ns2.{}'.format(TEST_DOMAIN)
TEST_WWW_RECORD = "www.{}".format(TEST_DOMAIN)
TEST_RECORD = {TEST_WWW_RECORD: '10.0.0.24'}
def test_expand_and_contract(self):
"""Test expanding and shrinking bind."""
test_config = lifecycle_utils.get_charm_config(fatal=False)
states = test_config.get("target_deploy_status", {})
if not self.post_xenial_queens:
raise unittest.SkipTest("Test not supported before Queens")
domain = designate_utils.create_or_return_zone(
self.designate,
name=self.TEST_DOMAIN,
email="test@zaza.com")
designate_utils.create_or_return_recordset(
self.designate,
domain['id'],
'www',
'A',
[self.TEST_RECORD[self.TEST_WWW_RECORD]])
# Test record is in bind and designate
designate_utils.check_dns_entry(
self.designate,
self.TEST_RECORD[self.TEST_WWW_RECORD],
self.TEST_DOMAIN,
record_name=self.TEST_WWW_RECORD)
logging.info('Adding a designate-bind unit')
zaza.model.add_unit('designate-bind', wait_appear=True)
zaza.model.block_until_all_units_idle()
zaza.model.wait_for_application_states(states=states)
logging.info('Performing DNS lookup on all units')
designate_utils.check_dns_entry(
self.designate,
self.TEST_RECORD[self.TEST_WWW_RECORD],
self.TEST_DOMAIN,
record_name=self.TEST_WWW_RECORD)
units = zaza.model.get_status().applications['designate-bind']['units']
doomed_unit = sorted(units.keys())[0]
logging.info('Removing {}'.format(doomed_unit))
zaza.model.destroy_unit(
'designate-bind',
doomed_unit,
wait_disappear=True)
zaza.model.block_until_all_units_idle()
zaza.model.wait_for_application_states(states=states)
logging.info('Performing DNS lookup on all units')
designate_utils.check_dns_entry(
self.designate,
self.TEST_RECORD[self.TEST_WWW_RECORD],
self.TEST_DOMAIN,
record_name=self.TEST_WWW_RECORD)

View File

@@ -0,0 +1,205 @@
"""Utilities for interacting with designate."""
import dns.resolver
import logging
import tenacity
import designateclient.exceptions
import zaza.model
def create_or_return_zone(client, name, email):
"""Create zone or return matching existing zone.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param name: Name of zone
:type name: str
:param email: Email address to associate with zone.
:type email: str
:returns: Zone
:rtype: designateclient.v2.zones.Zone
"""
try:
zone = client.zones.create(
name=name,
email=email)
except designateclient.exceptions.Conflict:
logging.info('{} zone already exists.'.format(name))
zones = [z for z in client.zones.list() if z['name'] == name]
assert len(zones) == 1, "Wrong number of zones found {}".format(zones)
zone = zones[0]
return zone
def create_or_return_recordset(client, zone_id, sub_domain, record_type, data):
"""Create recordset or return matching existing recordset.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param zone_id: uuid of zone
:type zone_id: str
:param sub_domain: Subdomain to associate records with
:type sub_domain: str
:param data: Dictionary of entries eg {'www.test.com': '10.0.0.24'}
:type data: dict
:returns: RecordSet
:rtype: designateclient.v2.recordsets.RecordSet
"""
try:
rs = client.recordsets.create(
zone_id,
sub_domain,
record_type,
data)
except designateclient.exceptions.Conflict:
logging.info('{} record already exists.'.format(data))
for r in client.recordsets.list(zone_id):
if r['name'].split('.')[0] == sub_domain:
rs = r
return rs
def get_designate_zone_objects(designate_client, domain_name=None,
domain_id=None):
"""Get all domains matching a given domain_name or domain_id.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param domain_name: Name of domain to lookup
:type domain_name: str
:param domain_id: UUID of domain to lookup
:type domain_id: str
:returns: List of Domain objects matching domain_name or domain_id
:rtype: [designateclient.v2.domains.Domain,]
"""
all_zones = designate_client.zones.list()
a = [z for z in all_zones
if z['name'] == domain_name or z['id'] == domain_id]
return a
def get_designate_domain_object(designate_client, domain_name):
"""Get the one and only domain matching the given domain_name.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param domain_name: Name of domain to lookup
:type domain_name:str
:returns: Domain with name domain_name
:rtype: designateclient.v2.domains.Domain
:raises: AssertionError
"""
dns_zone_id = get_designate_zone_objects(designate_client,
domain_name=domain_name)
msg = "Found {} domains for {}".format(
len(dns_zone_id),
domain_name)
assert len(dns_zone_id) == 1, msg
return dns_zone_id[0]
def get_designate_dns_records(designate_client, domain_name, ip):
"""Look for records in designate that match the given ip.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param domain_name: Name of domain to lookup
:type domain_name:str
:returns: List of Record objects matching matching IP address
:rtype: [designateclient.v2.records.Record,]
"""
dns_zone = get_designate_domain_object(designate_client, domain_name)
return [r for r in designate_client.recordsets.list(dns_zone['id'])
if r['records'] == ip]
def check_dns_record_exists(dns_server_ip, query_name, expected_ip,
retry_count=3):
"""Lookup a DNS record against the given dns server address.
:param dns_server_ip: IP address to run query against
:type dns_server_ip: str
:param query_name: Record to lookup
:type query_name: str
:param expected_ip: IP address expected to be associated with record.
:type expected_ip: str
:param retry_count: Number of times to retry query. Useful if waiting
for record to propagate.
:type retry_count: int
:raises: AssertionError
"""
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = [dns_server_ip]
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(retry_count),
wait=tenacity.wait_exponential(multiplier=1, min=2, max=10),
reraise=True):
with attempt:
logging.info("Checking record {} against {}".format(
query_name,
dns_server_ip))
answers = my_resolver.query(query_name)
for rdata in answers:
logging.info("Checking address returned by {} is correct".format(
dns_server_ip))
assert str(rdata) == expected_ip
def check_dns_entry(des_client, ip, domain, record_name):
"""Check that record for ip is in designate and in bind.
:param ip: IP address to lookup
:type ip: str
:param domain_name: Domain to look for record in
:type domain_name:str
:param record_name: record name
:type record_name: str
"""
check_dns_entry_in_designate(des_client, [ip], domain,
record_name=record_name)
check_dns_entry_in_bind(ip, record_name)
def check_dns_entry_in_designate(des_client, ip, domain, record_name=None):
"""Look for records in designate that match the given ip domain.
:param designate_client: Client to query designate
:type designate_client: designateclient.v2.Client
:param ip: IP address to lookup in designate
:type ip: str
:param domain_name: Name of domain to lookup
:type domain_name:str
:param record_name: Retrieved record should have this name
:type record_name: str
:raises: AssertionError
"""
records = get_designate_dns_records(des_client, domain, ip)
assert records, "Record not found for {} in designate".format(ip)
logging.info('Found record in {} for {} in designate'.format(domain, ip))
if record_name:
recs = [r for r in records if r['name'] == record_name]
assert recs, "No DNS entry name matches expected name {}".format(
record_name)
logging.info('Found record in {} for {} in designate'.format(
domain,
record_name))
def check_dns_entry_in_bind(ip, record_name, model_name=None):
"""Check that record for ip address is in bind.
:param ip: IP address to lookup
:type ip: str
:param record_name: record name
:type record_name: str
"""
for addr in zaza.model.get_app_ips('designate-bind',
model_name=model_name):
logging.info("Checking {} is {} against ({})".format(
record_name,
ip,
addr))
check_dns_record_exists(addr, record_name, ip, retry_count=6)

View File

@@ -16,6 +16,8 @@
"""Setup for BGP deployments."""
import logging
import zaza.model
from zaza.openstack.configure import (
network,
bgp_speaker,
@@ -86,6 +88,15 @@ def setup():
# Confugre the overcloud network
network.setup_sdn(network_config, keystone_session=keystone_session)
# LP Bugs #1784083 and #1841459, require a late restart of the
# neutron-bgp-dragent service
logging.warning("Due to LP Bugs #1784083 and #1841459, we require a late "
"restart of the neutron-bgp-dragent service before "
"setting up BGP.")
for unit in zaza.model.get_units("neutron-dynamic-routing"):
generic_utils.systemctl(unit, "neutron-bgp-dragent", command="restart")
# Configure BGP
bgp_speaker.setup_bgp_speaker(
peer_application_name=DEFAULT_PEER_APPLICATION_NAME,

View File

@@ -61,16 +61,17 @@ def test_bgp_routes(peer_application_name="quagga", keystone_session=None):
# This test may run immediately after configuration. It may take time for
# routes to propogate via BGP. Do a binary backoff.
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
reraise=True, stop=tenacity.stop_after_attempt(8))
reraise=True, stop=tenacity.stop_after_attempt(10))
def _assert_cidr_in_peer_routing_table(peer_unit, cidr):
logging.debug("Checking for {} on BGP peer {}"
.format(cidr, peer_unit))
# Run show ip route bgp on BGP peer
routes = juju_utils.remote_run(
peer_unit, remote_cmd='vtysh -c "show ip route bgp"')
logging.debug(routes)
logging.info(routes)
assert cidr in routes, (
"CIDR, {}, not found in BGP peer's routing table" .format(cidr))
"CIDR, {}, not found in BGP peer's routing table: {}"
.format(cidr, routes))
_assert_cidr_in_peer_routing_table(peer_unit, private_cidr)
logging.info("Private subnet CIDR, {}, found in routing table"

View File

@@ -14,10 +14,16 @@
"""Code for configuring glance."""
import json
import logging
import boto3
import zaza.model as model
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.utilities.deployment_env as deployment_env
CIRROS_IMAGE_NAME = "cirros"
CIRROS_ALT_IMAGE_NAME = "cirros_alt"
LTS_RELEASE = "bionic"
LTS_IMAGE_NAME = "bionic"
@@ -30,7 +36,37 @@ def basic_setup():
"""
def add_image(image_url, glance_client=None, image_name=None, tags=[]):
def _get_default_glance_client():
"""Create default Glance client using overcloud credentials."""
keystone_session = openstack_utils.get_overcloud_keystone_session()
glance_client = openstack_utils.get_glance_session_client(keystone_session)
return glance_client
def get_stores_info(glance_client=None):
"""Retrieve glance backing store info.
:param glance_client: Authenticated glanceclient
:type glance_client: glanceclient.Client
"""
glance_client = glance_client or _get_default_glance_client()
stores = glance_client.images.get_stores_info().get("stores", [])
return stores
def get_store_ids(glance_client=None):
"""Retrieve glance backing store ids.
:param glance_client: Authenticated glanceclient
:type glance_client: glanceclient.Client
"""
stores = get_stores_info(glance_client)
return [store["id"] for store in stores]
def add_image(image_url, glance_client=None, image_name=None, tags=[],
properties=None, backend=None, disk_format='qcow2',
visibility='public', container_format='bare'):
"""Retrieve image from ``image_url`` and add it to glance.
:param image_url: Retrievable URL with image data
@@ -41,11 +77,17 @@ def add_image(image_url, glance_client=None, image_name=None, tags=[]):
:type image_name: str
:param tags: List of tags to add to image
:type tags: list of str
:param properties: Properties to add to image
:type properties: dict
"""
if not glance_client:
keystone_session = openstack_utils.get_overcloud_keystone_session()
glance_client = openstack_utils.get_glance_session_client(
keystone_session)
glance_client = glance_client or _get_default_glance_client()
if backend is not None:
stores = get_store_ids(glance_client)
if backend not in stores:
raise ValueError("Invalid backend: %(backend)s "
"(available: %(available)s)" % {
"backend": backend,
"available": ", ".join(stores)})
if image_name:
image = openstack_utils.get_images_by_name(
glance_client, image_name)
@@ -59,7 +101,12 @@ def add_image(image_url, glance_client=None, image_name=None, tags=[]):
glance_client,
image_url,
image_name,
tags=tags)
tags=tags,
properties=properties,
backend=backend,
disk_format=disk_format,
visibility=visibility,
container_format=container_format)
def add_cirros_image(glance_client=None, image_name=None):
@@ -77,7 +124,20 @@ def add_cirros_image(glance_client=None, image_name=None):
image_name=image_name)
def add_lts_image(glance_client=None, image_name=None, release=None):
def add_cirros_alt_image(glance_client=None, image_name=None):
"""Add alt cirros image to the current deployment.
:param glance: Authenticated glanceclient
:type glance: glanceclient.Client
:param image_name: Label for the image in glance
:type image_name: str
"""
image_name = image_name or CIRROS_ALT_IMAGE_NAME
add_cirros_image(glance_client, image_name)
def add_lts_image(glance_client=None, image_name=None, release=None,
properties=None):
"""Add an Ubuntu LTS image to the current deployment.
:param glance: Authenticated glanceclient
@@ -86,12 +146,77 @@ def add_lts_image(glance_client=None, image_name=None, release=None):
:type image_name: str
:param release: Name of ubuntu release.
:type release: str
:param properties: Custom image properties
:type properties: dict
"""
deploy_ctxt = deployment_env.get_deployment_context()
image_arch = deploy_ctxt.get('TEST_IMAGE_ARCH', 'amd64')
arch_image_properties = {
'arm64': {'hw_firmware_type': 'uefi'},
'ppc64el': {'architecture': 'ppc64'}}
properties = properties or arch_image_properties.get(image_arch)
logging.info("Image architecture set to {}".format(image_arch))
image_name = image_name or LTS_IMAGE_NAME
release = release or LTS_RELEASE
image_url = openstack_utils.find_ubuntu_image(
release=release,
arch='amd64')
arch=image_arch)
add_image(image_url,
glance_client=glance_client,
image_name=image_name)
image_name=image_name,
properties=properties)
def configure_external_s3_backend():
"""Set up Ceph-radosgw as an external S3 backend for Glance."""
logging.info("Creating a test S3 user and credentials for Glance")
username, displayname = "zaza-glance-test", "Zaza Glance Test User"
cmd = "radosgw-admin user create --uid='{}' --display-name='{}'".format(
username, displayname
)
results = model.run_on_leader("ceph-mon", cmd)
stdout = json.loads(results["stdout"])
keys = stdout["keys"][0]
access_key, secret_key = keys["access_key"], keys["secret_key"]
logging.info("Getting S3 endpoint URL of Radosgw from Keystone")
keystone_auth = openstack_utils.get_overcloud_auth()
keystone_client = openstack_utils.get_keystone_client(keystone_auth)
endpoint_url = keystone_client.session.get_endpoint(
service_type="s3",
interface="public",
region="RegionOne",
)
logging.info("Creating a test S3 bucket for Glance")
bucket_name = "zaza-glance-s3-test"
s3_client = boto3.client(
"s3",
endpoint_url=endpoint_url,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
s3_client.create_bucket(Bucket=bucket_name)
logging.info("Updating Glance configs with S3 endpoint information")
model.set_application_config(
"glance",
{
"s3-store-host": endpoint_url,
"s3-store-access-key": access_key,
"s3-store-secret-key": secret_key,
"s3-store-bucket": bucket_name,
},
)
model.wait_for_agent_status()
logging.info("Waiting for units to reach target states")
model.wait_for_application_states(
states={
"glance": {
"workload-status": "active",
"workload-status-message": "Unit is ready",
}
}
)
model.block_until_all_units_idle()

View File

@@ -18,8 +18,10 @@
import logging
import zaza.openstack.utilities.openstack as openstack_utils
import boto3
import zaza.model as model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
class GlanceTest(test_utils.OpenStackBaseTest):
@@ -44,7 +46,7 @@ class GlanceTest(test_utils.OpenStackBaseTest):
def test_411_set_disk_format(self):
"""Change disk format and check.
Change disk format and assert then change propagates to the correct
Change disk format and assert that change propagates to the correct
file and that services are restarted as a result
"""
# Expected default and alternate values
@@ -67,6 +69,50 @@ class GlanceTest(test_utils.OpenStackBaseTest):
{'image_format': {'disk_formats': ['qcow2']}},
['glance-api'])
def test_412_image_conversion(self):
"""Check image-conversion config.
When image-conversion config is enabled glance will convert images
to raw format, this is only performed for interoperable image import
docs.openstack.org/glance/train/admin/interoperable-image-import.html
image conversion is done at server-side for better image handling
"""
current_release = openstack_utils.get_os_release()
bionic_stein = openstack_utils.get_os_release('bionic_stein')
if current_release < bionic_stein:
self.skipTest('image-conversion config is supported since '
'bionic_stein or newer versions')
with self.config_change({'image-conversion': 'false'},
{'image-conversion': 'true'}):
image_url = openstack_utils.find_cirros_image(arch='x86_64')
image = openstack_utils.create_image(
self.glance_client,
image_url,
'cirros-test-import',
force_import=True)
disk_format = self.glance_client.images.get(image.id).disk_format
self.assertEqual('raw', disk_format)
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change."""
# Config file affected by juju set config change
conf_file = '/etc/glance/glance-api.conf'
# Services which are expected to restart upon config change
services = {'glance-api': conf_file}
current_release = openstack_utils.get_os_release()
bionic_stein = openstack_utils.get_os_release('bionic_stein')
if current_release < bionic_stein:
services.update({'glance-registry': conf_file})
# Make config change, check for service restarts
logging.info('changing debug config')
self.restart_on_changed_debug_oslo_config_file(
conf_file,
services)
def test_901_pause_resume(self):
"""Run pause and resume tests.
@@ -74,3 +120,105 @@ class GlanceTest(test_utils.OpenStackBaseTest):
they are started
"""
self.pause_resume(['glance-api'])
class GlanceCephRGWBackendTest(test_utils.OpenStackBaseTest):
"""Encapsulate glance tests using the Ceph RGW backend.
It validates the Ceph RGW backend in glance, which uses the Swift API.
"""
@classmethod
def setUpClass(cls):
"""Run class setup for running glance tests."""
super(GlanceCephRGWBackendTest, cls).setUpClass()
swift_session = openstack_utils.get_keystone_session_from_relation(
'ceph-radosgw')
cls.swift = openstack_utils.get_swift_session_client(
swift_session)
cls.glance_client = openstack_utils.get_glance_session_client(
cls.keystone_session)
def test_100_create_image(self):
"""Create an image and do a simple validation of it.
The OpenStack Swift API is used to do the validation, since the Ceph
Rados Gateway serves an API which is compatible with that.
"""
image_name = 'zaza-ceph-rgw-image'
openstack_utils.create_image(
glance=self.glance_client,
image_url=openstack_utils.find_cirros_image(arch='x86_64'),
image_name=image_name,
backend='swift')
headers, containers = self.swift.get_account()
self.assertEqual(len(containers), 1)
container_name = containers[0].get('name')
headers, objects = self.swift.get_container(container_name)
images = openstack_utils.get_images_by_name(
self.glance_client,
image_name)
self.assertEqual(len(images), 1)
image = images[0]
total_bytes = 0
for ob in objects:
if '{}-'.format(image['id']) in ob['name']:
total_bytes = total_bytes + int(ob['bytes'])
logging.info(
'Checking glance image size {} matches swift '
'image size {}'.format(image['size'], total_bytes))
self.assertEqual(image['size'], total_bytes)
openstack_utils.delete_image(self.glance_client, image['id'])
class GlanceExternalS3Test(test_utils.OpenStackBaseTest):
"""Encapsulate glance tests using an external S3 backend."""
@classmethod
def setUpClass(cls):
"""Run class setup for running glance tests with S3 backend."""
super(GlanceExternalS3Test, cls).setUpClass()
cls.glance_client = openstack_utils.get_glance_session_client(
cls.keystone_session
)
configs = model.get_application_config("glance")
cls.s3_store_host = configs["s3-store-host"]["value"]
cls.s3_store_access_key = configs["s3-store-access-key"]["value"]
cls.s3_store_secret_key = configs["s3-store-secret-key"]["value"]
cls.s3_store_bucket = configs["s3-store-bucket"]["value"]
def test_100_create_delete_image(self):
"""Create an image and do a simple validation of it.
Validate the size of the image in both Glance API and actual S3 bucket.
"""
image_name = "zaza-s3-test-image"
openstack_utils.create_image(
glance=self.glance_client,
image_url=openstack_utils.find_cirros_image(arch="x86_64"),
image_name=image_name,
backend="s3",
)
images = openstack_utils.get_images_by_name(
self.glance_client, image_name
)
self.assertEqual(len(images), 1)
image = images[0]
s3_client = boto3.client(
"s3",
endpoint_url=self.s3_store_host,
aws_access_key_id=self.s3_store_access_key,
aws_secret_access_key=self.s3_store_secret_key,
)
response = s3_client.head_object(
Bucket=self.s3_store_bucket, Key=image["id"]
)
logging.info(
"Checking glance image size {} matches S3 object's ContentLength "
"{}".format(image["size"], response["ContentLength"])
)
self.assertEqual(image["size"], response["ContentLength"])
openstack_utils.delete_image(self.glance_client, image["id"])

View File

@@ -0,0 +1,16 @@
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing glance-simplestreams-sync."""

View File

@@ -0,0 +1,79 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for configuring glance-simplestreams-sync."""
import logging
import tenacity
import pprint
import zaza.model as zaza_model
import zaza.openstack.utilities.generic as generic_utils
import zaza.openstack.utilities.openstack as openstack_utils
def _get_catalog():
"""Retrieve the Keystone service catalog.
:returns: The raw Keystone service catalog.
:rtype: List[Dict]
"""
keystone_session = openstack_utils.get_overcloud_keystone_session()
keystone_client = openstack_utils.get_keystone_session_client(
keystone_session)
token = keystone_session.get_token()
token_data = keystone_client.tokens.get_token_data(token)
if 'catalog' not in token_data['token']:
raise ValueError('catalog not in token data: "{}"'
.format(pprint.pformat(token_data)))
return token_data['token']['catalog']
def sync_images():
"""Run image sync using an action.
Execute an initial image sync using an action to ensure that the
cloud is populated with images at the right point in time during
deployment.
"""
logging.info("Synchronising images using glance-simplestreams-sync")
catalog = None
try:
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_exponential(
multiplier=1, min=2, max=10),
reraise=True):
with attempt:
# Proactively retrieve the Keystone service catalog so that we
# can log it in the event of a failure.
catalog = _get_catalog()
generic_utils.assertActionRanOK(
zaza_model.run_action_on_leader(
"glance-simplestreams-sync",
"sync-images",
raise_on_failure=True,
action_params={},
)
)
except Exception:
logging.info('Contents of Keystone service catalog: "{}"'
.format(pprint.pformat(catalog)))
raise

View File

@@ -0,0 +1,127 @@
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate glance-simplestreams-sync testing."""
import json
import logging
import requests
import tenacity
import zaza.model as zaza_model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
@tenacity.retry(
retry=tenacity.retry_if_result(lambda images: len(images) < 4),
wait=tenacity.wait_fixed(6), # interval between retries
stop=tenacity.stop_after_attempt(100)) # retry times
def retry_image_sync(glance_client):
"""Wait for image sync with retry."""
# convert generator to list
return list(glance_client.images.list())
@tenacity.retry(
retry=tenacity.retry_if_exception_type(json.decoder.JSONDecodeError),
wait=tenacity.wait_fixed(10), reraise=True,
stop=tenacity.stop_after_attempt(10))
def get_product_streams(url):
"""Get product streams json data with retry."""
# There is a race between the images being available in glance and any
# metadata being written. Use tenacity to avoid this race.
client = requests.session()
json_data = client.get(url, verify=openstack_utils.get_cacert()).text
return json.loads(json_data)
class GlanceSimpleStreamsSyncTest(test_utils.OpenStackBaseTest):
"""Glance Simple Streams Sync Test."""
@classmethod
def setUpClass(cls):
"""Run class setup for running glance simple streams sync tests."""
super(GlanceSimpleStreamsSyncTest, cls).setUpClass()
# dict of OS_* env vars
overcloud_auth = openstack_utils.get_overcloud_auth()
cls.keystone_client = openstack_utils.get_keystone_client(
overcloud_auth)
cls.glance_client = openstack_utils.get_glance_session_client(
cls.keystone_session)
def test_010_wait_for_image_sync(self):
"""Wait for images to be synced. Expect at least four."""
self.assertTrue(retry_image_sync(self.glance_client))
def test_050_gss_permissions_regression_check_lp1611987(self):
"""Assert the intended file permissions on gss config files.
refer: https://bugs.launchpad.net/bugs/1611987
"""
file_paths = [
'/etc/glance-simplestreams-sync/identity.yaml',
'/etc/glance-simplestreams-sync/mirrors.yaml',
'/var/log/glance-simplestreams-sync.log',
]
expected_perms = '640'
application = 'glance-simplestreams-sync'
for unit in zaza_model.get_units(application):
for file_path in file_paths:
cmd = 'stat -c %a {}'.format(file_path)
result = zaza_model.run_on_unit(unit.name, cmd, timeout=30)
# {'Code': '', 'Stderr': '', 'Stdout': '644\n'}
perms = result.get('Stdout', '').strip()
self.assertEqual(perms, expected_perms)
logging.debug(
'Permissions on {}: {}'.format(file_path, perms))
def test_110_local_product_stream(self):
"""Verify that the local product stream is accessible and has data."""
logging.debug('Checking local product streams...')
expected_images = [
'com.ubuntu.cloud:server:14.04:amd64',
'com.ubuntu.cloud:server:16.04:amd64',
'com.ubuntu.cloud:server:18.04:amd64',
'com.ubuntu.cloud:server:20.04:amd64',
]
uri = "streams/v1/auto.sync.json"
# There is a race between the images being available in glance and the
# metadata being written for each image. Use tenacity to avoid this
# race and make the test idempotent.
@tenacity.retry(
retry=tenacity.retry_if_exception_type(
(AssertionError, KeyError)
),
wait=tenacity.wait_fixed(10), reraise=True,
stop=tenacity.stop_after_attempt(25))
def _check_local_product_streams(expected_images):
# Refresh from catalog as URL may change if swift in use.
ps_interface = self.keystone_client.service_catalog.url_for(
service_type='product-streams', interface='publicURL'
)
url = "{}/{}".format(ps_interface, uri)
logging.info('Retrieving product stream information'
' from {}'.format(url))
product_streams = get_product_streams(url)
logging.debug(product_streams)
images = product_streams["products"]
for image in expected_images:
self.assertIn(image, images)
_check_local_product_streams(expected_images)
logging.debug("Local product stream successful")

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing gnocchi."""

View File

@@ -0,0 +1,69 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for Gnocchi tests."""
import logging
import zaza.model as model
import zaza.openstack.utilities.openstack as openstack_utils
def configure_s3_backend():
"""Inject S3 parameters from Swift for Gnocchi config."""
session = openstack_utils.get_overcloud_keystone_session()
ks_client = openstack_utils.get_keystone_session_client(session)
logging.info('Retrieving S3 connection data from Swift')
token_data = ks_client.tokens.get_token_data(session.get_token())
project_id = token_data['token']['project']['id']
user_id = token_data['token']['user']['id']
# Store URL to service providing S3 compatible API
for entry in token_data['token']['catalog']:
if entry['type'] == 's3':
for endpoint in entry['endpoints']:
if endpoint['interface'] == 'public':
s3_region = endpoint['region']
s3_endpoint = endpoint['url']
# Create AWS compatible application credentials in Keystone
ec2_creds = ks_client.ec2.create(user_id, project_id)
logging.info('Changing Gnocchi charm config to connect to S3')
model.set_application_config(
'gnocchi',
{'s3-endpoint-url': s3_endpoint,
's3-region-name': s3_region,
's3-access-key-id': ec2_creds.access,
's3-secret-access-key': ec2_creds.secret}
)
logging.info('Waiting for units to execute config-changed hook')
model.wait_for_agent_status()
logging.info('Waiting for units to reach target states')
model.wait_for_application_states(
states={
'gnocchi': {
'workload-status-': 'active',
'workload-status-message': 'Unit is ready'
},
'ceilometer': {
'workload-status': 'blocked',
'workload-status-message': 'Run the ' +
'ceilometer-upgrade action on the leader ' +
'to initialize ceilometer and gnocchi'
}
}
)
model.block_until_all_units_idle()

View File

@@ -0,0 +1,144 @@
#!/usr/bin/env python3
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Gnocchi testing."""
import base64
import boto3
import logging
import pprint
from gnocchiclient.v1 import client as gnocchi_client
import zaza.model as model
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities as utilities
import zaza.openstack.utilities.openstack as openstack_utils
class GnocchiTest(test_utils.OpenStackBaseTest):
"""Encapsulate Gnocchi tests."""
@property
def services(self):
"""Return a list of services for the selected OpenStack release."""
return ['haproxy', 'gnocchi-metricd', 'apache2']
def test_200_api_connection(self):
"""Simple api calls to check service is up and responding."""
logging.info('Instantiating gnocchi client...')
overcloud_auth = openstack_utils.get_overcloud_auth()
keystone = openstack_utils.get_keystone_client(overcloud_auth)
gnocchi_ep = keystone.service_catalog.url_for(
service_type='metric',
interface='publicURL'
)
gnocchi = gnocchi_client.Client(
session=openstack_utils.get_overcloud_keystone_session(),
adapter_options={
'endpoint_override': gnocchi_ep,
}
)
logging.info('Checking api functionality...')
assert(gnocchi.status.get() != [])
def test_910_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started.
"""
with self.pause_resume(self.services):
logging.info("Testing pause and resume")
class GnocchiS3Test(test_utils.OpenStackBaseTest):
"""Test Gnocchi for S3 storage backend."""
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(GnocchiS3Test, cls).setUpClass()
session = openstack_utils.get_overcloud_keystone_session()
ks_client = openstack_utils.get_keystone_session_client(session)
# Get token data so we can clean our user_id and project_id
token_data = ks_client.tokens.get_token_data(session.get_token())
project_id = token_data['token']['project']['id']
user_id = token_data['token']['user']['id']
# Store URL to service providing S3 compatible API
for entry in token_data['token']['catalog']:
if entry['type'] == 's3':
for endpoint in entry['endpoints']:
if endpoint['interface'] == 'public':
cls.s3_region = endpoint['region']
cls.s3_endpoint = endpoint['url']
# Create AWS compatible application credentials in Keystone
cls.ec2_creds = ks_client.ec2.create(user_id, project_id)
def test_s3_list_gnocchi_buckets(self):
"""Verify that the gnocchi buckets were created in the S3 backend."""
kwargs = {
'region_name': self.s3_region,
'aws_access_key_id': self.ec2_creds.access,
'aws_secret_access_key': self.ec2_creds.secret,
'endpoint_url': self.s3_endpoint,
'verify': self.cacert,
}
s3_client = boto3.client('s3', **kwargs)
bucket_names = ['gnocchi-measure', 'gnocchi-aggregates']
# Validate their presence
bucket_list = s3_client.list_buckets()
logging.info(pprint.pformat(bucket_list))
for bkt in bucket_list['Buckets']:
for gnocchi_bkt in bucket_names:
if bkt['Name'] == gnocchi_bkt:
break
else:
AssertionError('Bucket "{}" not found'.format(gnocchi_bkt))
class GnocchiExternalCATest(test_utils.OpenStackBaseTest):
"""Test Gnocchi for external root CA config option."""
def test_upload_external_cert(self):
"""Verify that the external CA is uploaded correctly."""
logging.info('Changing value for trusted-external-ca-cert.')
ca_cert_option = 'trusted-external-ca-cert'
ppk, cert = utilities.cert.generate_cert('gnocchi_test.ci.local')
b64_cert = base64.b64encode(cert).decode()
config = {
ca_cert_option: b64_cert,
}
model.set_application_config(
'gnocchi',
config
)
model.block_until_all_units_idle()
files = [
'/usr/local/share/ca-certificates/gnocchi-external.crt',
'/etc/ssl/certs/gnocchi-external.pem',
]
for file in files:
logging.info("Validating that {} is created.".format(file))
model.block_until_file_has_contents('gnocchi', file, 'CERTIFICATE')
logging.info("Found {} successfully.".format(file))

View File

@@ -0,0 +1,15 @@
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing hacluster."""

View File

@@ -0,0 +1,196 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HACluster testing."""
import logging
import os
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.configure.hacluster
import zaza.utilities.juju as juju_utils
class HaclusterBaseTest(test_utils.OpenStackBaseTest):
"""Base class for hacluster tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running hacluster tests."""
super(HaclusterBaseTest, cls).setUpClass()
cls.vip = os.environ.get("TEST_VIP00")
class HaclusterTest(HaclusterBaseTest):
"""hacluster tests."""
def test_900_action_cleanup(self):
"""The services can be cleaned up."""
zaza.model.run_action_on_leader(
self.application_name,
'cleanup',
raise_on_failure=True)
def test_910_pause_and_resume(self):
"""The services can be paused and resumed."""
with self.pause_resume([]):
logging.info("Testing pause resume")
def _toggle_maintenance_and_wait(self, expected):
"""Configure cluster maintenance-mode.
:param expected: expected value to set maintenance-mode
"""
config = {"maintenance-mode": expected}
logging.info("Setting config to {}".format(config))
zaza.model.set_application_config(self.application_name, config)
if expected == 'true':
_states = {"hacluster": {
"workload-status": "maintenance",
"workload-status-message": "Pacemaker in maintenance mode"}}
else:
_states = {"hacluster": {
"workload-status": "active",
"workload-status-message": "Unit is ready and clustered"}}
zaza.model.wait_for_application_states(states=_states)
logging.debug('OK')
def test_920_put_in_maintenance(self):
"""Put pacemaker in maintenance mode."""
logging.debug('Setting cluster in maintenance mode')
self._toggle_maintenance_and_wait('true')
self._toggle_maintenance_and_wait('false')
class HaclusterScaleBackAndForthTest(HaclusterBaseTest):
"""hacluster tests scaling back and forth."""
@classmethod
def setUpClass(cls):
"""Run class setup for running hacluster tests."""
super(HaclusterScaleBackAndForthTest, cls).setUpClass()
test_config = cls.test_config['tests_options']['hacluster']
cls._principle_app_name = test_config['principle-app-name']
cls._hacluster_charm_name = test_config['hacluster-charm-name']
def test_930_scaleback(self):
"""Remove one unit, recalculate quorum and re-add one unit.
NOTE(lourot): before lp:1400481 was fixed, the corosync ring wasn't
recalculated when removing units. So within a cluster of 3 units,
removing a unit and re-adding one led to a situation where corosync
considers having 3 nodes online out of 4, instead of just 3 out of 3.
This test covers this scenario.
"""
principle_units = sorted(zaza.model.get_status().applications[
self._principle_app_name]['units'].keys())
self.assertEqual(len(principle_units), 3)
surviving_principle_unit = principle_units[0]
doomed_principle_unit = principle_units[1]
surviving_hacluster_unit = juju_utils.get_subordinate_units(
[surviving_principle_unit],
charm_name=self._hacluster_charm_name)[0]
doomed_hacluster_unit = juju_utils.get_subordinate_units(
[doomed_principle_unit],
charm_name=self._hacluster_charm_name)[0]
logging.info('Pausing unit {}'.format(doomed_hacluster_unit))
zaza.model.run_action(
doomed_hacluster_unit,
'pause',
raise_on_failure=True)
logging.info('Removing {}'.format(doomed_principle_unit))
zaza.model.destroy_unit(
self._principle_app_name,
doomed_principle_unit,
wait_disappear=True)
logging.info('Waiting for model to settle')
zaza.model.block_until_unit_wl_status(surviving_hacluster_unit,
'blocked')
# NOTE(lourot): the surviving principle units (usually keystone units)
# aren't guaranteed to be blocked, so we don't validate that here.
zaza.model.block_until_all_units_idle()
# At this point the corosync ring hasn't been updated yet, so it should
# still remember the deleted unit:
self.__assert_some_corosync_nodes_are_offline(surviving_hacluster_unit)
logging.info('Updating corosync ring')
hacluster_app_name = zaza.model.get_unit_from_name(
surviving_hacluster_unit).application
zaza.model.run_action_on_leader(
hacluster_app_name,
'update-ring',
action_params={'i-really-mean-it': True},
raise_on_failure=True)
# At this point if the corosync ring has been properly updated, there
# shouldn't be any trace of the deleted unit anymore:
self.__assert_all_corosync_nodes_are_online(surviving_hacluster_unit)
logging.info('Re-adding an hacluster unit')
zaza.model.add_unit(self._principle_app_name, wait_appear=True)
logging.info('Waiting for model to settle')
# NOTE(lourot): the principle charm may remain blocked here. This seems
# to happen often when it is keystone and has a mysql-router as other
# subordinate charm. The keystone units seems to often remain blocked
# with 'Database not initialised'. This is not the hacluster charm's
# fault and this is why we don't validate here that the entire model
# goes back to active/idle.
zaza.model.block_until_unit_wl_status(surviving_hacluster_unit,
'active')
zaza.model.block_until_all_units_idle()
# Because of lp:1874719 the corosync ring may show a mysterious offline
# 'node1' node. We clean up the ring by re-running the 'update-ring'
# action:
logging.info('Updating corosync ring - workaround for lp:1874719')
zaza.model.run_action_on_leader(
hacluster_app_name,
'update-ring',
action_params={'i-really-mean-it': True},
raise_on_failure=True)
# At this point the corosync ring should not contain any offline node:
self.__assert_all_corosync_nodes_are_online(surviving_hacluster_unit)
def __assert_some_corosync_nodes_are_offline(self, hacluster_unit):
logging.info('Checking that corosync considers at least one node to '
'be offline')
output = self._get_crm_status(hacluster_unit)
self.assertIn('OFFLINE', output,
"corosync should list at least one offline node")
def __assert_all_corosync_nodes_are_online(self, hacluster_unit):
logging.info('Checking that corosync considers all nodes to be online')
output = self._get_crm_status(hacluster_unit)
self.assertNotIn('OFFLINE', output,
"corosync shouldn't list any offline node")
@staticmethod
def _get_crm_status(hacluster_unit):
cmd = 'sudo crm status'
result = zaza.model.run_on_unit(hacluster_unit, cmd)
code = result.get('Code')
if code != '0':
raise zaza.model.CommandRunFailed(cmd, result)
output = result.get('Stdout').strip()
logging.debug('crm output received: {}'.format(output))
return output

View File

@@ -0,0 +1,17 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing heat."""

View File

@@ -0,0 +1,256 @@
#!/usr/bin/env python3
#
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate heat testing."""
import logging
import json
import os
import subprocess
from urllib import parse as urlparse
from heatclient.common import template_utils
import zaza.model
import zaza.openstack.charm_tests.nova.utils as nova_utils
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.charm_lifecycle.utils as charm_lifecycle_utils
# Resource and name constants
IMAGE_NAME = 'cirros'
STACK_NAME = 'hello_world'
RESOURCE_TYPE = 'server'
TEMPLATES_PATH = 'files'
FLAVOR_NAME = 'm1.tiny'
class HeatBasicDeployment(test_utils.OpenStackBaseTest):
"""Encapsulate Heat tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running Heat tests."""
super(HeatBasicDeployment, cls).setUpClass()
cls.application = 'heat'
cls.keystone_session = openstack_utils.get_overcloud_keystone_session()
cls.heat_client = openstack_utils.get_heat_session_client(
cls.keystone_session)
cls.glance_client = openstack_utils.get_glance_session_client(
cls.keystone_session)
cls.nova_client = openstack_utils.get_nova_session_client(
cls.keystone_session)
@property
def services(self):
"""Return a list services for the selected OpenStack release.
:returns: List of services
:rtype: [str]
"""
services = ['heat-api', 'heat-api-cfn', 'heat-engine']
return services
def test_100_domain_setup(self):
"""Run required action for a working Heat unit."""
# Action is REQUIRED to run for a functioning heat deployment
logging.info('Running domain-setup action on heat unit...')
unit = zaza.model.get_units(self.application_name)[0]
zaza.model.block_until_unit_wl_status(unit.entity_id, "active")
zaza.model.run_action(unit.entity_id, "domain-setup")
zaza.model.block_until_unit_wl_status(unit.entity_id, "active")
def test_400_heat_resource_types_list(self):
"""Check default resource list behavior and confirm functionality."""
logging.info('Checking default heat resource list...')
types = self.heat_client.resource_types.list()
self.assertIsInstance(types, list, "Resource type is not a list!")
self.assertGreater(len(types), 0, "Resource type list len is zero")
def test_410_heat_stack_create_delete(self):
"""Create stack, confirm nova compute resource, delete stack."""
# Verify new image name
images_list = list(self.glance_client.images.list())
self.assertEqual(images_list[0].name, IMAGE_NAME,
"glance image create failed or unexpected")
# Create a heat stack from a heat template, verify its status
logging.info('Creating heat stack...')
t_name = 'hot_hello_world.yaml'
if (openstack_utils.get_os_release() <
openstack_utils.get_os_release('xenial_queens')):
os_release = 'icehouse'
else:
os_release = 'queens'
# Get location of template files in charm-heat
bundle_path = charm_lifecycle_utils.BUNDLE_DIR
if bundle_path[-1:] == "/":
bundle_path = bundle_path[0:-1]
file_rel_path = os.path.join(os.path.dirname(bundle_path),
TEMPLATES_PATH, os_release, t_name)
file_abs_path = os.path.abspath(file_rel_path)
t_url = urlparse.urlparse(file_abs_path, scheme='file').geturl()
logging.info('template url: {}'.format(t_url))
r_req = self.heat_client.http_client
t_files, template = template_utils.get_template_contents(t_url, r_req)
env_files, env = template_utils.process_environment_and_files(
env_path=None)
fields = {
'stack_name': STACK_NAME,
'timeout_mins': '15',
'disable_rollback': False,
'parameters': {
'admin_pass': 'Ubuntu',
'key_name': nova_utils.KEYPAIR_NAME,
'image': IMAGE_NAME
},
'template': template,
'files': dict(list(t_files.items()) + list(env_files.items())),
'environment': env
}
# Create the stack
try:
stack = self.heat_client.stacks.create(**fields)
logging.info('Stack data: {}'.format(stack))
stack_id = stack['stack']['id']
logging.info('Creating new stack, ID: {}'.format(stack_id))
except Exception as e:
# Generally, an api or cloud config error if this is hit.
msg = 'Failed to create heat stack: {}'.format(e)
self.fail(msg)
# Confirm stack reaches COMPLETE status.
# /!\ Heat stacks reach a COMPLETE status even when nova cannot
# find resources (a valid hypervisor) to fit the instance, in
# which case the heat stack self-deletes! Confirm anyway...
openstack_utils.resource_reaches_status(self.heat_client.stacks,
stack_id,
expected_status="COMPLETE",
msg="Stack status wait")
# List stack
stacks = list(self.heat_client.stacks.list())
logging.info('All stacks: {}'.format(stacks))
# Get stack information
try:
stack = self.heat_client.stacks.get(STACK_NAME)
except Exception as e:
# Generally, a resource availability issue if this is hit.
msg = 'Failed to get heat stack: {}'.format(e)
self.fail(msg)
# Confirm stack name.
logging.info('Expected, actual stack name: {}, '
'{}'.format(STACK_NAME, stack.stack_name))
self.assertEqual(stack.stack_name, STACK_NAME,
'Stack name mismatch, '
'{} != {}'.format(STACK_NAME, stack.stack_name))
# Confirm existence of a heat-generated nova compute resource
logging.info('Confirming heat stack resource status...')
resource = self.heat_client.resources.get(STACK_NAME, RESOURCE_TYPE)
server_id = resource.physical_resource_id
self.assertTrue(server_id, "Stack failed to spawn a compute resource.")
# Confirm nova instance reaches ACTIVE status
openstack_utils.resource_reaches_status(self.nova_client.servers,
server_id,
expected_status="ACTIVE",
msg="nova instance")
logging.info('Nova instance reached ACTIVE status')
# Delete stack
logging.info('Deleting heat stack...')
openstack_utils.delete_resource(self.heat_client.stacks,
STACK_NAME, msg="heat stack")
def test_500_auth_encryption_key_same_on_units(self):
"""Test the auth_encryption_key in heat.conf is same on all units."""
logging.info("Checking the 'auth_encryption_key' is the same on "
"all units.")
output, ret = self._run_arbitrary(
"--application heat "
"--format json "
"grep auth_encryption_key /etc/heat/heat.conf")
if ret:
msg = "juju run error: ret: {}, output: {}".format(ret, output)
self.assertEqual(ret, 0, msg)
output = json.loads(output)
keys = {}
for r in output:
k = r['Stdout'].split('=')[1].strip()
keys[r['UnitId']] = k
# see if keys are different
ks = set(keys.values())
self.assertEqual(len(ks), 1, "'auth_encryption_key' is not identical "
"on every unit: {}".format("{}={}".format(k, v)
for k, v in keys.items()))
@staticmethod
def _run_arbitrary(command, timeout=300):
"""Run an arbitrary command (as root), but not necessarily on a unit.
(Otherwise the self.run(...) command could have been used for the unit
:param command: The command to run.
:type command: str
:param timeout: Seconds to wait before timing out.
:type timeout: int
:raises: subprocess.CalledProcessError.
:returns: A pair containing the output of the command and exit value
:rtype: (str, int)
"""
cmd = ['juju', 'run', '--timeout', "{}s".format(timeout),
] + command.split()
p = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
output = stdout if p.returncode == 0 else stderr
return output.decode('utf8').strip(), p.returncode
def test_900_heat_restart_on_config_change(self):
"""Verify the specified services are restarted when config changes."""
logging.info('Testing restart on configuration change')
# Expected default and alternate values
set_default = {'use-syslog': 'False'}
set_alternate = {'use-syslog': 'True'}
# Config file affected by juju set config change
conf_file = '/etc/heat/heat.conf'
# Make config change, check for service restarts
# In Amulet we waited 30 seconds...do we still need to?
logging.info('Making configuration change')
self.restart_on_changed(
conf_file,
set_default,
set_alternate,
None,
None,
self.services)
def test_910_pause_and_resume(self):
"""Run services pause and resume tests."""
logging.info('Checking pause and resume actions...')
with self.pause_resume(self.services):
logging.info("Testing pause resume")

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing ironic."""

View File

@@ -0,0 +1,184 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for configuring ironic."""
import copy
import os
import tenacity
import zaza.openstack.charm_tests.glance.setup as glance_setup
import zaza.openstack.utilities.openstack as openstack_utils
from zaza.openstack.utilities import (
cli as cli_utils,
)
import zaza.model as zaza_model
FLAVORS = {
'bm1.small': {
'flavorid': 2,
'ram': 2048,
'disk': 20,
'vcpus': 1,
'properties': {
"resources:CUSTOM_BAREMETAL1_SMALL": 1,
},
},
'bm1.medium': {
'flavorid': 3,
'ram': 4096,
'disk': 40,
'vcpus': 2,
'properties': {
"resources:CUSTOM_BAREMETAL1_MEDIUM": 1,
},
},
'bm1.large': {
'flavorid': 4,
'ram': 8192,
'disk': 40,
'vcpus': 4,
'properties': {
"resources:CUSTOM_BAREMETAL1_LARGE": 1,
},
},
'bm1.tempest': {
'flavorid': 6,
'ram': 256,
'disk': 1,
'vcpus': 1,
'properties': {
"resources:CUSTOM_BAREMETAL1_TEMPEST": 1,
},
},
'bm2.tempest': {
'flavorid': 7,
'ram': 512,
'disk': 1,
'vcpus': 1,
'properties': {
"resources:CUSTOM_BAREMETAL2_TEMPEST": 1,
},
},
}
def _add_image(url, image_name, backend="swift",
disk_format="raw", container_format="bare"):
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(3),
reraise=True):
with attempt:
glance_setup.add_image(
url,
image_name=image_name,
backend=backend,
disk_format=disk_format,
container_format=container_format)
def add_ironic_deployment_image(initrd_url=None, kernel_url=None):
"""Add Ironic deploy images to glance.
:param initrd_url: URL where the ari image resides
:type initrd_url: str
:param kernel_url: URL where the aki image resides
:type kernel_url: str
"""
base_name = 'ironic-deploy'
initrd_name = "{}-initrd".format(base_name)
vmlinuz_name = "{}-vmlinuz".format(base_name)
if not initrd_url:
initrd_url = os.environ.get('TEST_IRONIC_DEPLOY_INITRD', None)
if not kernel_url:
kernel_url = os.environ.get('TEST_IRONIC_DEPLOY_VMLINUZ', None)
if not all([initrd_url, kernel_url]):
raise ValueError("Missing required deployment image URLs")
_add_image(
initrd_url,
initrd_name,
backend="swift",
disk_format="ari",
container_format="ari")
_add_image(
kernel_url,
vmlinuz_name,
backend="swift",
disk_format="aki",
container_format="aki")
def add_ironic_os_image(image_url=None):
"""Upload the operating system images built for bare metal deployments.
:param image_url: URL where the image resides
:type image_url: str
"""
image_url = image_url or os.environ.get(
'TEST_IRONIC_RAW_BM_IMAGE', None)
image_name = "baremetal-ubuntu-image"
if image_url is None:
raise ValueError("Missing image_url")
_add_image(
image_url,
image_name,
backend="swift",
disk_format="raw",
container_format="bare")
def set_temp_url_secret():
"""Run the set-temp-url-secret on the ironic-conductor leader.
This is needed if direct boot method is enabled.
"""
zaza_model.run_action_on_leader(
'ironic-conductor',
'set-temp-url-secret',
action_params={})
def create_bm_flavors(nova_client=None):
"""Create baremetal flavors.
:param nova_client: Authenticated nova client
:type nova_client: novaclient.v2.client.Client
"""
if not nova_client:
keystone_session = openstack_utils.get_overcloud_keystone_session()
nova_client = openstack_utils.get_nova_session_client(
keystone_session)
cli_utils.setup_logging()
names = [flavor.name for flavor in nova_client.flavors.list()]
# Disable scheduling based on standard flavor properties
default_properties = {
"resources:VCPU": 0,
"resources:MEMORY_MB": 0,
"resources:DISK_GB": 0,
}
for flavor in FLAVORS.keys():
if flavor not in names:
properties = copy.deepcopy(default_properties)
properties.update(FLAVORS[flavor]["properties"])
bm_flavor = nova_client.flavors.create(
name=flavor,
ram=FLAVORS[flavor]['ram'],
vcpus=FLAVORS[flavor]['vcpus'],
disk=FLAVORS[flavor]['disk'],
flavorid=FLAVORS[flavor]['flavorid'])
bm_flavor.set_keys(properties)

View File

@@ -0,0 +1,83 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate ironic testing."""
import logging
import ironicclient.client as ironic_client
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
def _get_ironic_client(ironic_api_version="1.58"):
keystone_session = openstack_utils.get_overcloud_keystone_session()
ironic = ironic_client.Client(1, session=keystone_session,
os_ironic_api_version=ironic_api_version)
return ironic
class IronicTest(test_utils.OpenStackBaseTest):
"""Run Ironic specific tests."""
_SERVICES = ['ironic-api']
def test_110_catalog_endpoints(self):
"""Verify that the endpoints are present in the catalog."""
overcloud_auth = openstack_utils.get_overcloud_auth()
keystone_client = openstack_utils.get_keystone_client(
overcloud_auth)
actual_endpoints = keystone_client.service_catalog.get_endpoints()
actual_interfaces = [endpoint['interface'] for endpoint in
actual_endpoints["baremetal"]]
for expected_interface in ('internal', 'admin', 'public'):
assert(expected_interface in actual_interfaces)
def test_400_api_connection(self):
"""Simple api calls to check service is up and responding."""
ironic = _get_ironic_client()
logging.info('listing conductors')
conductors = ironic.conductor.list()
assert(len(conductors) > 0)
# By default, only IPMI HW type is enabled. iDrac and Redfish
# can optionally be enabled
drivers = ironic.driver.list()
driver_names = [drv.name for drv in drivers]
expected = ['intel-ipmi', 'ipmi']
for exp in expected:
assert(exp in driver_names)
assert(len(driver_names) == 2)
def test_900_restart_on_config_change(self):
"""Checking restart happens on config change.
Change debug mode and assert that change propagates to the correct
file and that services are restarted as a result
"""
self.restart_on_changed_debug_oslo_config_file(
'/etc/ironic/ironic.conf', self._SERVICES)
def test_910_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started
"""
logging.info('Skipping pause resume test LP: #1886202...')
return
with self.pause_resume(self._SERVICES):
logging.info("Testing pause resume")

View File

@@ -0,0 +1,21 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing keystone-kerberos."""
class KerberosConfigurationError(Exception):
"""Custom exception for Kerberos test server."""
pass

View File

@@ -0,0 +1,239 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for keystone-kerberos tests."""
import logging
import tempfile
import zaza.model
from zaza.openstack.utilities import openstack as openstack_utils
from zaza.openstack.charm_tests.kerberos import KerberosConfigurationError
def get_unit_full_hostname(unit_name):
"""Retrieve the full hostname of a unit."""
for unit in zaza.model.get_units(unit_name):
result = zaza.model.run_on_unit(unit.entity_id, 'hostname -f')
hostname = result['Stdout'].rstrip()
return hostname
def add_empty_resource_file_to_keystone_kerberos():
"""Add an empty resource to keystone kerberos to complete the setup."""
logging.info('Attaching an empty keystone keytab to the keystone-kerberos'
' unit')
tmp_file = '/tmp/empty.keytab'
with open(tmp_file, 'w'):
pass
zaza.model.attach_resource('keystone-kerberos',
'keystone_keytab',
tmp_file)
logging.info('Waiting for keystone-kerberos unit to be active and idle')
unit_name = zaza.model.get_units('keystone-kerberos')[0].name
zaza.model.block_until_unit_wl_status(unit_name, "active")
zaza.model.block_until_all_units_idle()
def add_dns_entry(kerberos_hostname="kerberos.testubuntu.com"):
"""Add a dns entry in /etc/hosts for the kerberos test server.
:param kerberos_hostname: FQDN of Kerberos server
:type kerberos_hostname: string
"""
logging.info('Retrieving kerberos IP and hostname')
kerberos_ip = zaza.model.get_app_ips("kerberos-server")[0]
cmd = "sudo sed -i '/localhost/i\\{}\t{}' /etc/hosts"\
.format(kerberos_ip, kerberos_hostname)
app_names = ['keystone', 'ubuntu-test-host']
for app_name in app_names:
logging.info('Adding dns entry to the {} unit'.format(app_name))
zaza_unit = zaza.model.get_units(app_name)[0]
zaza.model.run_on_unit(zaza_unit.entity_id, cmd)
def configure_keystone_service_in_kerberos():
"""Configure the keystone service in Kerberos.
A principal needs to be added to the kerberos server to get a keytab for
this service. The keytab is used for the authentication of the keystone
service.
"""
logging.info('Configure keystone service in Kerberos')
unit = zaza.model.get_units('kerberos-server')[0]
keystone_hostname = get_unit_full_hostname('keystone')
commands = ['sudo kadmin.local -q "addprinc -randkey -clearpolicy '
'HTTP/{}"'.format(keystone_hostname),
'sudo kadmin.local -q "ktadd '
'-k /home/ubuntu/keystone.keytab '
'HTTP/{}"'.format(keystone_hostname),
'sudo chmod 777 /home/ubuntu/keystone.keytab']
try:
for command in commands:
logging.info(
'Sending command to the kerberos-server: {}'.format(command))
result = zaza.model.run_on_unit(unit.name, command)
if result['Stderr']:
raise KerberosConfigurationError
elif result['Stdout']:
logging.info('Stdout: {}'.format(result['Stdout']))
except KerberosConfigurationError:
logging.error('An error occured : {}'.format(result['Stderr']))
def retrieve_and_attach_keytab():
"""Retrieve and attach the keytab to the keystone-kerberos unit."""
kerberos_server = zaza.model.get_units('kerberos-server')[0]
dump_file = "keystone.keytab"
remote_file = "/home/ubuntu/keystone.keytab"
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_file = "{}/{}".format(tmpdirname, dump_file)
logging.info('Retrieving keystone.keytab from the kerberos server.')
zaza.model.scp_from_unit(
kerberos_server.name,
remote_file,
tmp_file)
logging.info('Attaching the keystone_keytab resource to '
'keystone-kerberos')
zaza.model.attach_resource('keystone-kerberos',
'keystone_keytab',
tmp_file)
# cs:ubuntu charm has changed behaviour and we can't rely on the workload
# staus message. Thus, ignore it.
states = {
"ubuntu-test-host": {
"workload-status": "active",
"workload-status-message": "",
}
}
zaza.model.wait_for_application_states(states=states)
zaza.model.block_until_all_units_idle()
def openstack_setup_kerberos():
"""Create a test domain, project, and user for kerberos tests."""
kerberos_domain = 'k8s'
kerberos_project = 'k8s'
kerberos_user = 'admin'
kerberos_password = 'password123'
role = 'admin'
logging.info('Retrieving a keystone session and client.')
keystone_session = openstack_utils.get_overcloud_keystone_session()
keystone_client = openstack_utils.get_keystone_session_client(
keystone_session)
logging.info('Creating domain, project and user for Kerberos tests.')
domain = keystone_client.domains.create(kerberos_domain,
description='Kerberos Domain',
enabled=True)
project = keystone_client.projects.create(kerberos_project,
domain,
description='Test project',
enabled=True)
demo_user = keystone_client.users.create(kerberos_user,
domain=domain,
project=project,
password=kerberos_password,
email='demo@demo.com',
description='Demo User',
enabled=True)
admin_role = keystone_client.roles.find(name=role)
keystone_client.roles.grant(
admin_role,
user=demo_user,
project_domain=domain,
project=project
)
keystone_client.roles.grant(
admin_role,
user=demo_user,
domain=domain
)
def setup_kerberos_configuration_for_test_host():
"""Retrieve the keytab and krb5.conf to setup the ubuntu test host."""
kerberos_server = zaza.model.get_units('kerberos-server')[0]
ubuntu_test_host = zaza.model.get_units('ubuntu-test-host')[0]
dump_file = "krb5.keytab"
remote_file = "/etc/krb5.keytab"
host_keytab_path = '/home/ubuntu/krb5.keytab'
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_file = "{}/{}".format(tmpdirname, dump_file)
logging.info("Retrieving {} from {}.".format(remote_file,
kerberos_server.name))
zaza.model.scp_from_unit(
kerberos_server.name,
remote_file,
tmp_file)
logging.info("SCP {} to {} on {}.".format(tmp_file,
host_keytab_path,
ubuntu_test_host.name))
zaza.model.scp_to_unit(
ubuntu_test_host.name,
tmp_file,
host_keytab_path)
dump_file = "krb5.conf"
remote_file = "/etc/krb5.conf"
temp_krb5_path = "/home/ubuntu/krb5.conf"
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_file = "{}/{}".format(tmpdirname, dump_file)
logging.info("Retrieving {} from {}".format(remote_file,
kerberos_server.name))
zaza.model.scp_from_unit(
kerberos_server.name,
remote_file,
tmp_file)
logging.info("SCP {} to {} on {}.".format(tmp_file,
temp_krb5_path,
ubuntu_test_host))
zaza.model.scp_to_unit(
ubuntu_test_host.name,
tmp_file,
temp_krb5_path)
logging.info('Moving {} to {} on {}.'.format(temp_krb5_path,
remote_file, ubuntu_test_host.name))
zaza.model.run_on_unit(ubuntu_test_host.name, ('sudo mv {} {}'.
format(temp_krb5_path, remote_file)))
def install_apt_packages_on_ubuntu_test_host():
"""Install apt packages on a zaza unit."""
ubuntu_test_host = zaza.model.get_units('ubuntu-test-host')[0]
packages = ['krb5-user', 'python3-openstackclient',
'python3-requests-kerberos']
for package in packages:
logging.info('Installing {}'.format(package))
result = zaza.model.run_on_unit(ubuntu_test_host.name,
"apt install {} -y".format(package))
assert result['Code'] == '0', result['Stderr']
def run_all_configuration_steps():
"""Execute all the necessary functions for the tests setup."""
add_empty_resource_file_to_keystone_kerberos()
add_dns_entry()
configure_keystone_service_in_kerberos()
retrieve_and_attach_keytab()
openstack_setup_kerberos()
setup_kerberos_configuration_for_test_host()
install_apt_packages_on_ubuntu_test_host()

View File

@@ -0,0 +1,74 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keystone Kerberos Tests."""
import logging
import zaza.model
from zaza.openstack.charm_tests.kerberos.setup import get_unit_full_hostname
from zaza.openstack.charm_tests.keystone import BaseKeystoneTest
from zaza.openstack.utilities import openstack as openstack_utils
class CharmKeystoneKerberosTest(BaseKeystoneTest):
"""Charm Keystone Kerberos Test."""
@classmethod
def setUpClass(cls):
"""Run class setup for running Keystone Kerberos charm tests."""
super(CharmKeystoneKerberosTest, cls).setUpClass()
def test_keystone_kerberos_authentication(self):
"""Validate auth to OpenStack through the kerberos method."""
logging.info('Retrieving a kerberos token with kinit for admin user')
ubuntu_test_host = zaza.model.get_units('ubuntu-test-host')[0]
result = zaza.model.run_on_unit(ubuntu_test_host.name,
"echo password123 | kinit admin")
assert result['Code'] == '0', result['Stderr']
logging.info('Changing token mod for user access')
result = zaza.model.run_on_unit(
ubuntu_test_host.name,
"sudo install -m 777 /tmp/krb5cc_0 /tmp/krb5cc_1000"
)
assert result['Code'] == '0', result['Stderr']
logging.info('Fetching user/project info in OpenStack')
domain_name = 'k8s'
project_name = 'k8s'
keystone_session = openstack_utils.get_overcloud_keystone_session()
keystone_client = openstack_utils.get_keystone_session_client(
keystone_session)
domain_id = keystone_client.domains.find(name=domain_name).id
project_id = keystone_client.projects.find(name=project_name).id
keystone_hostname = get_unit_full_hostname('keystone')
logging.info('Retrieving an OpenStack token to validate auth')
cmd = 'openstack token issue -f value -c id ' \
'--os-auth-url http://{}:5000/krb/v3 ' \
'--os-project-id {} ' \
'--os-project-name {} ' \
'--os-project-domain-id {} ' \
'--os-region-name RegionOne ' \
'--os-interface public ' \
'--os-identity-api-version 3 ' \
'--os-auth-type v3kerberos'.format(keystone_hostname,
project_id,
project_name,
domain_id)
result = zaza.model.run_on_unit(ubuntu_test_host.name, cmd)
assert result['Code'] == '0', result['Stderr']

View File

@@ -13,6 +13,7 @@
# limitations under the License.
"""Collection of code for setting up and testing keystone."""
import contextlib
import zaza
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.utilities.openstack as openstack_utils
@@ -25,14 +26,19 @@ DEMO_ADMIN_USER_PASSWORD = 'password'
DEMO_USER = 'demo'
DEMO_PASSWORD = 'password'
TEMPEST_ROLES = ['member', 'ResellerAdmin']
class BaseKeystoneTest(test_utils.OpenStackBaseTest):
"""Base for Keystone charm tests."""
@classmethod
def setUpClass(cls):
def setUpClass(cls, application_name=None):
"""Run class setup for running Keystone charm operation tests."""
super(BaseKeystoneTest, cls).setUpClass(application_name='keystone')
# Standardize v2 and v3 as ints
cls.api_v2 = 2
cls.api_v3 = 3
# Check if we are related to Vault TLS certificates
cls.tls_rid = zaza.model.get_relation_id(
'keystone', 'vault', remote_interface_name='certificates')
@@ -50,12 +56,21 @@ class BaseKeystoneTest(test_utils.OpenStackBaseTest):
cls.keystone_ips.append(cls.vip)
if (openstack_utils.get_os_release() <
openstack_utils.get_os_release('xenial_queens')):
cls.default_api_version = '2'
cls.default_api_version = cls.api_v2
else:
cls.default_api_version = '3'
cls.default_api_version = cls.api_v3
cls.admin_keystone_session = (
openstack_utils.get_overcloud_keystone_session())
cls.admin_keystone_client = (
openstack_utils.get_keystone_session_client(
cls.admin_keystone_session,
client_api_version=cls.default_api_version))
@contextlib.contextmanager
def v3_keystone_preferred(self):
"""Set the preferred keystone api to v3 within called context."""
with self.config_change(
{'preferred-api-version': self.default_api_version},
{'preferred-api-version': self.api_v3},
application_name="keystone"):
yield

View File

@@ -14,6 +14,12 @@
"""Code for setting up keystone."""
import logging
import keystoneauth1
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.model
import zaza.openstack.utilities.openstack as openstack_utils
from zaza.openstack.charm_tests.keystone import (
BaseKeystoneTest,
@@ -24,9 +30,28 @@ from zaza.openstack.charm_tests.keystone import (
DEMO_ADMIN_USER_PASSWORD,
DEMO_USER,
DEMO_PASSWORD,
TEMPEST_ROLES,
)
def wait_for_cacert(model_name=None):
"""Wait for keystone to install a cacert.
:param model_name: Name of model to query.
:type model_name: str
"""
logging.info("Waiting for cacert")
zaza.openstack.utilities.openstack.block_until_ca_exists(
'keystone',
'CERTIFICATE',
model_name=model_name)
zaza.model.block_until_all_units_idle(model_name=model_name)
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get('target_deploy_status', {}),
model_name=model_name)
def add_demo_user():
"""Add a demo user to the current deployment."""
def _v2():
@@ -110,8 +135,35 @@ def add_demo_user():
# under test other than keystone.
with _singleton.config_change(
{'preferred-api-version': _singleton.default_api_version},
{'preferred-api-version': '3'}, application_name="keystone"):
{'preferred-api-version': 3}, application_name="keystone"):
_v3()
else:
# create only V3 user
_v3()
def _add_additional_roles(roles):
"""Add additional roles to this deployment.
:param ctxt: roles
:type ctxt: list
:returns: None
:rtype: None
"""
keystone_session = openstack_utils.get_overcloud_keystone_session()
keystone_client = openstack_utils.get_keystone_session_client(
keystone_session)
for role_name in roles:
try:
keystone_client.roles.create(role_name)
except keystoneauth1.exceptions.http.Conflict:
pass
def add_tempest_roles():
"""Add tempest roles to this deployment.
:returns: None
:rtype: None
"""
_add_additional_roles(TEMPEST_ROLES)

View File

@@ -17,14 +17,13 @@ import collections
import json
import logging
import pprint
import keystoneauth1
import zaza.model
import zaza.openstack.utilities.exceptions as zaza_exceptions
import zaza.openstack.utilities.juju as juju_utils
import zaza.utilities.juju as juju_utils
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.charm_lifecycle.utils as lifecycle_utils
import zaza.openstack.charm_tests.test_utils as test_utils
from zaza.openstack.charm_tests.keystone import (
BaseKeystoneTest,
@@ -189,10 +188,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
openstack_utils.get_os_release('trusty_mitaka')):
logging.info('skipping test < trusty_mitaka')
return
with self.config_change(
{'preferred-api-version': self.default_api_version},
{'preferred-api-version': '3'},
application_name="keystone"):
with self.v3_keystone_preferred():
for ip in self.keystone_ips:
try:
logging.info('keystone IP {}'.format(ip))
@@ -212,7 +208,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
def test_end_user_domain_admin_access(self):
"""Verify that end-user domain admin does not have elevated privileges.
In additon to validating that the `policy.json` is written and the
In addition to validating that the `policy.json` is written and the
service is restarted on config-changed, the test validates that our
`policy.json` is correct.
@@ -222,10 +218,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
openstack_utils.get_os_release('xenial_ocata')):
logging.info('skipping test < xenial_ocata')
return
with self.config_change(
{'preferred-api-version': self.default_api_version},
{'preferred-api-version': '3'},
application_name="keystone"):
with self.v3_keystone_preferred():
for ip in self.keystone_ips:
openrc = {
'API_VERSION': 3,
@@ -236,7 +229,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
'OS_DOMAIN_NAME': DEMO_DOMAIN,
}
if self.tls_rid:
openrc['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT
openrc['OS_CACERT'] = openstack_utils.get_cacert()
openrc['OS_AUTH_URL'] = (
openrc['OS_AUTH_URL'].replace('http', 'https'))
logging.info('keystone IP {}'.format(ip))
@@ -257,7 +250,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
'allowed when it should not be.')
logging.info('OK')
def test_end_user_acccess_and_token(self):
def test_end_user_access_and_token(self):
"""Verify regular end-user access resources and validate token data.
In effect this also validates user creation, presence of standard
@@ -266,9 +259,10 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
"""
def _validate_token_data(openrc):
if self.tls_rid:
openrc['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT
openrc['OS_CACERT'] = openstack_utils.get_cacert()
openrc['OS_AUTH_URL'] = (
openrc['OS_AUTH_URL'].replace('http', 'https'))
logging.info('keystone IP {}'.format(ip))
keystone_session = openstack_utils.get_keystone_session(
openrc)
keystone_client = openstack_utils.get_keystone_session_client(
@@ -326,15 +320,27 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest):
'OS_PROJECT_DOMAIN_NAME': DEMO_DOMAIN,
'OS_PROJECT_NAME': DEMO_PROJECT,
}
with self.config_change(
{'preferred-api-version': self.default_api_version},
{'preferred-api-version': '3'},
application_name="keystone"):
with self.v3_keystone_preferred():
for ip in self.keystone_ips:
openrc.update(
{'OS_AUTH_URL': 'http://{}:5000/v3'.format(ip)})
_validate_token_data(openrc)
def test_backward_compatible_uuid_for_default_domain(self):
"""Check domain named ``default`` literally has ``default`` as ID.
Some third party software chooses to hard code this value for some
inexplicable reason.
"""
with self.v3_keystone_preferred():
ks_session = openstack_utils.get_keystone_session(
openstack_utils.get_overcloud_auth())
ks_client = openstack_utils.get_keystone_session_client(
ks_session)
domain = ks_client.domains.get('default')
logging.info(pprint.pformat(domain))
assert domain.id == 'default'
class SecurityTests(BaseKeystoneTest):
"""Keystone security tests tests."""
@@ -350,13 +356,13 @@ class SecurityTests(BaseKeystoneTest):
# this initial work to get validation in. There will be bugs targeted
# to each one and resolved independently where possible.
expected_failures = [
'disable-admin-token',
]
expected_passes = [
'check-max-request-body-size',
'uses-sha256-for-hashing-tokens',
'uses-fernet-token-after-default',
'disable-admin-token',
'insecure-debug-is-false',
'uses-fernet-token-after-default',
'uses-sha256-for-hashing-tokens',
'validate-file-ownership',
'validate-file-permissions',
]
@@ -370,4 +376,307 @@ class SecurityTests(BaseKeystoneTest):
action_params={}),
expected_passes,
expected_failures,
expected_to_pass=False)
expected_to_pass=True)
class LdapTests(BaseKeystoneTest):
"""Keystone ldap tests."""
non_string_type_keys = ('ldap-user-enabled-mask',
'ldap-user-enabled-invert',
'ldap-group-members-are-ids',
'ldap-use-pool')
@classmethod
def setUpClass(cls):
"""Run class setup for running Keystone ldap-tests."""
super(LdapTests, cls).setUpClass()
def _get_ldap_config(self):
"""Generate ldap config for current model.
:return: tuple of whether ldap-server is running and if so, config
for the keystone-ldap application.
:rtype: Tuple[bool, Dict[str,str]]
"""
ldap_ips = zaza.model.get_app_ips("ldap-server")
self.assertTrue(ldap_ips, "Should be at least one ldap server")
return {
'ldap-server': "ldap://{}".format(ldap_ips[0]),
'ldap-user': 'cn=admin,dc=test,dc=com',
'ldap-password': 'crapper',
'ldap-suffix': 'dc=test,dc=com',
'domain-name': 'userdomain',
'ldap-config-flags':
{
'group_tree_dn': 'ou=groups,dc=test,dc=com',
'group_objectclass': 'posixGroup',
'group_name_attribute': 'cn',
'group_member_attribute': 'memberUid',
'group_members_are_ids': 'true',
}
}
def _find_keystone_v3_user(self, username, domain, group=None):
"""Find a user within a specified keystone v3 domain.
:param str username: Username to search for in keystone
:param str domain: username selected from which domain
:param str group: group to search for in keystone for group membership
:return: return username if found
:rtype: Optional[str]
"""
for ip in self.keystone_ips:
logging.info('Keystone IP {}'.format(ip))
session = openstack_utils.get_keystone_session(
openstack_utils.get_overcloud_auth(address=ip))
client = openstack_utils.get_keystone_session_client(session)
if group is None:
domain_users = client.users.list(
domain=client.domains.find(name=domain).id,
)
else:
domain_users = client.users.list(
domain=client.domains.find(name=domain).id,
group=self._find_keystone_v3_group(group, domain).id,
)
usernames = [u.name.lower() for u in domain_users]
if username.lower() in usernames:
return username
logging.debug(
"User {} was not found. Returning None.".format(username)
)
return None
def _find_keystone_v3_group(self, group, domain):
"""Find a group within a specified keystone v3 domain.
:param str group: Group to search for in keystone
:param str domain: group selected from which domain
:return: return group if found
:rtype: Optional[str]
"""
for ip in self.keystone_ips:
logging.info('Keystone IP {}'.format(ip))
session = openstack_utils.get_keystone_session(
openstack_utils.get_overcloud_auth(address=ip))
client = openstack_utils.get_keystone_session_client(session)
domain_groups = client.groups.list(
domain=client.domains.find(name=domain).id
)
for searched_group in domain_groups:
if searched_group.name.lower() == group.lower():
return searched_group
logging.debug(
"Group {} was not found. Returning None.".format(group)
)
return None
def test_100_keystone_ldap_users(self):
"""Validate basic functionality of keystone API with ldap."""
application_name = 'keystone-ldap'
intended_cfg = self._get_ldap_config()
current_cfg, non_string_cfg = (
self.config_current_separate_non_string_type_keys(
self.non_string_type_keys, intended_cfg, application_name)
)
with self.config_change(
{},
non_string_cfg,
application_name=application_name,
reset_to_charm_default=True):
with self.config_change(
current_cfg,
intended_cfg,
application_name=application_name):
logging.info(
'Waiting for users to become available in keystone...'
)
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {})
)
with self.v3_keystone_preferred():
# NOTE(jamespage): Test fixture should have
# johndoe and janedoe accounts
johndoe = self._find_keystone_v3_user(
'john doe', 'userdomain')
self.assertIsNotNone(
johndoe, "user 'john doe' was unknown")
janedoe = self._find_keystone_v3_user(
'jane doe', 'userdomain')
self.assertIsNotNone(
janedoe, "user 'jane doe' was unknown")
def test_101_keystone_ldap_groups(self):
"""Validate basic functionality of keystone API with ldap."""
application_name = 'keystone-ldap'
intended_cfg = self._get_ldap_config()
current_cfg, non_string_cfg = (
self.config_current_separate_non_string_type_keys(
self.non_string_type_keys, intended_cfg, application_name)
)
with self.config_change(
{},
non_string_cfg,
application_name=application_name,
reset_to_charm_default=True):
with self.config_change(
current_cfg,
intended_cfg,
application_name=application_name):
logging.info(
'Waiting for groups to become available in keystone...'
)
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {})
)
with self.v3_keystone_preferred():
# NOTE(arif-ali): Test fixture should have openstack and
# admin groups
openstack_group = self._find_keystone_v3_group(
'openstack', 'userdomain')
self.assertIsNotNone(
openstack_group.name, "group 'openstack' was unknown")
admin_group = self._find_keystone_v3_group(
'admin', 'userdomain')
self.assertIsNotNone(
admin_group.name, "group 'admin' was unknown")
def test_102_keystone_ldap_group_membership(self):
"""Validate basic functionality of keystone API with ldap."""
application_name = 'keystone-ldap'
intended_cfg = self._get_ldap_config()
current_cfg, non_string_cfg = (
self.config_current_separate_non_string_type_keys(
self.non_string_type_keys, intended_cfg, application_name)
)
with self.config_change(
{},
non_string_cfg,
application_name=application_name,
reset_to_charm_default=True):
with self.config_change(
current_cfg,
intended_cfg,
application_name=application_name):
logging.info(
'Waiting for groups to become available in keystone...'
)
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {})
)
with self.v3_keystone_preferred():
# NOTE(arif-ali): Test fixture should have openstack and
# admin groups
openstack_group = self._find_keystone_v3_user(
'john doe', 'userdomain', group='openstack')
self.assertIsNotNone(
openstack_group,
"john doe was not in group 'openstack'")
admin_group = self._find_keystone_v3_user(
'john doe', 'userdomain', group='admin')
self.assertIsNotNone(
admin_group, "'john doe' was not in group 'admin'")
class LdapExplicitCharmConfigTests(LdapTests):
"""Keystone ldap tests."""
def _get_ldap_config(self):
"""Generate ldap config for current model.
:return: tuple of whether ldap-server is running and if so, config
for the keystone-ldap application.
:rtype: Tuple[bool, Dict[str,str]]
"""
ldap_ips = zaza.model.get_app_ips("ldap-server")
self.assertTrue(ldap_ips, "Should be at least one ldap server")
return {
'ldap-server': "ldap://{}".format(ldap_ips[0]),
'ldap-user': 'cn=admin,dc=test,dc=com',
'ldap-password': 'crapper',
'ldap-suffix': 'dc=test,dc=com',
'domain-name': 'userdomain',
'ldap-query-scope': 'one',
'ldap-user-objectclass': 'inetOrgPerson',
'ldap-user-id-attribute': 'cn',
'ldap-user-name-attribute': 'sn',
'ldap-user-enabled-attribute': 'enabled',
'ldap-user-enabled-invert': False,
'ldap-user-enabled-mask': 0,
'ldap-user-enabled-default': 'True',
'ldap-group-tree-dn': 'ou=groups,dc=test,dc=com',
'ldap-group-objectclass': '',
'ldap-group-id-attribute': 'cn',
'ldap-group-name-attribute': 'cn',
'ldap-group-member-attribute': 'memberUid',
'ldap-group-members-are-ids': True,
'ldap-config-flags': '{group_objectclass: "posixGroup",'
' use_pool: True,'
' group_tree_dn: "group_tree_dn_foobar"}',
}
def test_200_config_flags_precedence(self):
"""Validates precedence when the same config options are used."""
application_name = 'keystone-ldap'
intended_cfg = self._get_ldap_config()
current_cfg, non_string_cfg = (
self.config_current_separate_non_string_type_keys(
self.non_string_type_keys, intended_cfg, application_name)
)
with self.config_change(
{},
non_string_cfg,
application_name=application_name,
reset_to_charm_default=True):
with self.config_change(
current_cfg,
intended_cfg,
application_name=application_name):
logging.info(
'Performing LDAP settings validation in keystone.conf...'
)
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {})
)
units = zaza.model.get_units("keystone-ldap",
model_name=self.model_name)
result = zaza.model.run_on_unit(
units[0].name,
"cat /etc/keystone/domains/keystone.userdomain.conf")
# not present in charm config, but present in config flags
self.assertIn("use_pool = True", result['stdout'],
"use_pool value is expected to be present and "
"set to True in the config file")
# ldap-config-flags overriding empty charm config value
self.assertIn("group_objectclass = posixGroup",
result['stdout'],
"group_objectclass is expected to be present and"
" set to posixGroup in the config file")
# overridden by charm config, not written to file
self.assertNotIn(
"group_tree_dn_foobar",
result['stdout'],
"user_tree_dn ldap-config-flags value needs to be "
"overridden by ldap-user-tree-dn in config file")
# complementing the above, value used is from charm setting
self.assertIn("group_tree_dn = ou=groups", result['stdout'],
"user_tree_dn value is expected to be present "
"and set to dc=test,dc=com in the config file")

View File

@@ -0,0 +1,15 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of code for setting up and testing Magpie."""

View File

@@ -0,0 +1,82 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Magpie testing."""
import logging
import zaza
import zaza.model
import zaza.openstack.charm_tests.test_utils as test_utils
class MagpieTest(test_utils.BaseCharmTest):
"""Base Magpie tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for Magpie charm operation tests."""
super(MagpieTest, cls).setUpClass()
unit_names = sorted(
[i.entity_id
for i in zaza.model.get_units('magpie')])
cls.test_unit_0 = unit_names[0]
cls.test_unit_1 = unit_names[1]
def test_break_dns_single(self):
"""Check DNS failure is reflected in workload status."""
zaza.model.run_on_unit(
self.test_unit_0,
'mv /etc/resolv.conf /etc/resolv.conf.bak')
zaza.model.run_on_unit(
self.test_unit_0,
'./hooks/update-status')
zaza.model.block_until_unit_wl_message_match(
self.test_unit_0,
'.*rev dns failed.*')
logging.info('Restoring /etc/resolv.conf')
zaza.model.run_on_unit(
self.test_unit_0,
'mv /etc/resolv.conf.bak /etc/resolv.conf')
logging.info('Updating status')
zaza.model.run_on_unit(
self.test_unit_0,
'./hooks/update-status')
def test_break_ping_single(self):
"""Check ping failure is reflected in workload status."""
icmp = "iptables {} INPUT -p icmp --icmp-type echo-request -j REJECT"
logging.info('Blocking ping on {}'.format(self.test_unit_1))
zaza.model.run_on_unit(
self.test_unit_1,
icmp.format('--append'))
zaza.model.run_on_unit(
self.test_unit_0,
'./hooks/update-status')
logging.info('Checking status on {}'.format(self.test_unit_0))
zaza.model.block_until_unit_wl_message_match(
self.test_unit_0,
'.*icmp failed.*')
logging.info('Allowing ping on {}'.format(self.test_unit_1))
zaza.model.run_on_unit(
self.test_unit_1,
icmp.format('--delete'))
zaza.model.run_on_unit(
self.test_unit_0,
'./hooks/update-status')
logging.info('Checking status on {}'.format(self.test_unit_0))
zaza.model.block_until_unit_wl_message_match(
self.test_unit_0,
'.*icmp ok.*')

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Manila setup and testing."""

View File

@@ -0,0 +1,376 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Manila testing."""
import logging
import tenacity
from manilaclient import client as manilaclient
import zaza.model
import zaza.openstack.configure.guest as guest
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.test_utils as test_utils
import zaza.openstack.charm_tests.nova.utils as nova_utils
import zaza.openstack.charm_tests.neutron.tests as neutron_tests
def verify_status(stdin, stdout, stderr):
"""Callable to verify the command output.
It checks if the command successfully executed.
This is meant to be given as parameter 'verify' to the helper function
'openstack_utils.ssh_command'.
"""
status = stdout.channel.recv_exit_status()
if status:
logging.info("{}".format(stderr.readlines()[0].strip()))
assert status == 0
def verify_manila_testing_file(stdin, stdout, stderr):
"""Callable to verify the command output.
It checks if the command successfully executed, and it validates the
testing file written on the Manila share.
This is meant to be given as parameter 'verify' to the helper function
'openstack_utils.ssh_command'.
"""
verify_status(stdin, stdout, stderr)
out = ""
for line in iter(stdout.readline, ""):
out += line
assert out == "test\n"
class ManilaTests(test_utils.OpenStackBaseTest):
"""Encapsulate Manila tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(ManilaTests, cls).setUpClass()
cls.manila_client = manilaclient.Client(
session=cls.keystone_session, client_version='2')
def test_manila_api(self):
"""Test that the Manila API is working."""
# The manila charm contains a 'band-aid' for Bug #1706699 which relies
# on update-status to bring up services if needed. When the tests run
# an update-status hook might not have run so services may still be
# stopped so force a hook execution.
for unit in zaza.model.get_units('manila'):
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
self.assertEqual([], self._list_shares())
@tenacity.retry(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=3, min=2, max=10))
def _list_shares(self):
return self.manila_client.shares.list()
class ManilaBaseTest(test_utils.OpenStackBaseTest):
"""Encapsulate a Manila basic functionality test."""
RESOURCE_PREFIX = 'zaza-manilatests'
INSTANCE_KEY = 'bionic'
INSTANCE_USERDATA = """#cloud-config
packages:
- nfs-common
"""
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(ManilaBaseTest, cls).setUpClass()
cls.nova_client = openstack_utils.get_nova_session_client(
session=cls.keystone_session)
cls.manila_client = manilaclient.Client(
session=cls.keystone_session, client_version='2')
cls.share_name = 'test-manila-share'
cls.share_type_name = 'default_share_type'
cls.share_protocol = 'nfs'
cls.mount_dir = '/mnt/manila_share'
cls.share_network = None
@classmethod
def tearDownClass(cls):
"""Run class teardown after tests finished."""
# Cleanup Nova servers
logging.info('Cleaning up test Nova servers')
fips_reservations = []
for vm in cls.nova_client.servers.list():
fips_reservations += neutron_tests.floating_ips_from_instance(vm)
vm.delete()
openstack_utils.resource_removed(
cls.nova_client.servers,
vm.id,
msg="Waiting for the Nova VM {} to be deleted".format(vm.name))
# Delete FiPs reservations
logging.info('Cleaning up test FiPs reservations')
neutron = openstack_utils.get_neutron_session_client(
session=cls.keystone_session)
for fip in neutron.list_floatingips()['floatingips']:
if fip['floating_ip_address'] in fips_reservations:
neutron.delete_floatingip(fip['id'])
# Cleanup Manila shares
logging.info('Cleaning up test shares')
for share in cls.manila_client.shares.list():
share.delete()
openstack_utils.resource_removed(
cls.manila_client.shares,
share.id,
msg="Waiting for the Manila share {} to be deleted".format(
share.name))
# Cleanup test Manila share servers (spawned by the driver when DHSS
# is enabled).
logging.info('Cleaning up test shares servers (if found)')
for server in cls.manila_client.share_servers.list():
server.delete()
openstack_utils.resource_removed(
cls.manila_client.share_servers,
server.id,
msg="Waiting for the share server {} to be deleted".format(
server.id))
def _get_mount_options(self):
"""Get the appropriate mount options used to mount the Manila share.
:returns: The proper mount options flags for the share protocol.
:rtype: string
"""
if self.share_protocol == 'nfs':
return 'nfsvers=4.1,proto=tcp'
else:
raise NotImplementedError(
'Share protocol not supported yet: {}'.format(
self.share_protocol))
def _mount_share_on_instance(self, instance_ip, ssh_user_name,
ssh_private_key, share_path):
"""Mount a share into a Nova instance.
The mount command is executed via SSH.
:param instance_ip: IP of the Nova instance.
:type instance_ip: string
:param ssh_user_name: SSH user name.
:type ssh_user_name: string
:param ssh_private_key: SSH private key.
:type ssh_private_key: string
:param share_path: Share network path.
:type share_path: string
"""
ssh_cmd = (
'sudo mkdir -p {0} && '
'sudo mount -t {1} -o {2} {3} {0}'.format(
self.mount_dir,
self.share_protocol,
self._get_mount_options(),
share_path))
for attempt in tenacity.Retrying(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)):
with attempt:
openstack_utils.ssh_command(
vm_name="instance-{}".format(instance_ip),
ip=instance_ip,
username=ssh_user_name,
privkey=ssh_private_key,
command=ssh_cmd,
verify=verify_status)
@tenacity.retry(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=3, min=2, max=10))
def _write_testing_file_on_instance(self, instance_ip, ssh_user_name,
ssh_private_key):
"""Write a file on a Manila share mounted into a Nova instance.
Write a testing file into the already mounted Manila share from the
given Nova instance (which is meant to be validated from another
instance). These commands are executed via SSH.
:param instance_ip: IP of the Nova instance.
:type instance_ip: string
:param ssh_user_name: SSH user name.
:type ssh_user_name: string
:param ssh_private_key: SSH private key.
:type ssh_private_key: string
"""
openstack_utils.ssh_command(
vm_name="instance-{}".format(instance_ip),
ip=instance_ip,
username=ssh_user_name,
privkey=ssh_private_key,
command='echo "test" | sudo tee {}/test'.format(
self.mount_dir),
verify=verify_status)
@tenacity.retry(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=3, min=2, max=10))
def _clear_testing_file_on_instance(self, instance_ip, ssh_user_name,
ssh_private_key):
"""Clear a file on a Manila share mounted into a Nova instance.
Remove a testing file into the already mounted Manila share from the
given Nova instance (which is meant to be validated from another
instance). These commands are executed via SSH.
:param instance_ip: IP of the Nova instance.
:type instance_ip: string
:param ssh_user_name: SSH user name.
:type ssh_user_name: string
:param ssh_private_key: SSH private key.
:type ssh_private_key: string
"""
openstack_utils.ssh_command(
vm_name="instance-{}".format(instance_ip),
ip=instance_ip,
username=ssh_user_name,
privkey=ssh_private_key,
command='sudo rm {}/test'.format(
self.mount_dir),
verify=verify_status)
@tenacity.retry(
stop=tenacity.stop_after_attempt(5),
wait=tenacity.wait_exponential(multiplier=3, min=2, max=10))
def _validate_testing_file_from_instance(self, instance_ip, ssh_user_name,
ssh_private_key):
"""Validate a file from the Manila share mounted into a Nova instance.
This is meant to run after the testing file was already written into
another Nova instance. It validates the written file. The commands are
executed via SSH.
:param instance_ip: IP of the Nova instance.
:type instance_ip: string
:param ssh_user_name: SSH user name.
:type ssh_user_name: string
:param ssh_private_key: SSH private key.
:type ssh_private_key: string
"""
openstack_utils.ssh_command(
vm_name="instance-{}".format(instance_ip),
ip=instance_ip,
username=ssh_user_name,
privkey=ssh_private_key,
command='sudo cat {}/test'.format(self.mount_dir),
verify=verify_manila_testing_file)
def _restart_share_instance(self):
"""Restart the share service's provider.
restart_share_instance is intended to be overridden with driver
specific implementations that allow verrification that the share is
still accessible after the service is restarted.
:returns bool: If the test should re-validate
:rtype: bool
"""
return False
def test_manila_share(self):
"""Test that a Manila share can be accessed on two instances.
1. Spawn two servers
2. Create a share
3. Mount it on both
4. Write a file on one
5. Read it on the other
6. Profit
"""
# Spawn Servers
instance_1 = self.launch_guest(
guest_name='ins-1',
userdata=self.INSTANCE_USERDATA,
instance_key=self.INSTANCE_KEY)
instance_2 = self.launch_guest(
guest_name='ins-2',
userdata=self.INSTANCE_USERDATA,
instance_key=self.INSTANCE_KEY)
fip_1 = neutron_tests.floating_ips_from_instance(instance_1)[0]
fip_2 = neutron_tests.floating_ips_from_instance(instance_2)[0]
# Create a share
share = self.manila_client.shares.create(
share_type=self.share_type_name,
name=self.share_name,
share_proto=self.share_protocol,
share_network=self.share_network,
size=1)
# Wait for the created share to become available before it gets used.
openstack_utils.resource_reaches_status(
self.manila_client.shares,
share.id,
wait_iteration_max_time=120,
stop_after_attempt=2,
expected_status="available",
msg="Waiting for a share to become available")
# Grant access to the Manila share for both Nova instances.
share.allow(access_type='ip', access=fip_1, access_level='rw')
share.allow(access_type='ip', access=fip_2, access_level='rw')
ssh_user_name = guest.boot_tests[self.INSTANCE_KEY]['username']
privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME)
share_path = share.export_locations[0]
# Write a testing file on instance #1
self._mount_share_on_instance(
fip_1, ssh_user_name, privkey, share_path)
self._write_testing_file_on_instance(
fip_1, ssh_user_name, privkey)
# Validate the testing file from instance #2
self._mount_share_on_instance(
fip_2, ssh_user_name, privkey, share_path)
self._validate_testing_file_from_instance(
fip_2, ssh_user_name, privkey)
# Restart the share provider
if self._restart_share_instance():
logging.info("Verifying manila after restarting share instance")
# Read the previous testing file from instance #1
self._mount_share_on_instance(
fip_1, ssh_user_name, privkey, share_path)
self._validate_testing_file_from_instance(
fip_1, ssh_user_name, privkey)
# Read the previous testing file from instance #1
self._mount_share_on_instance(
fip_2, ssh_user_name, privkey, share_path)
# Reset the test!
self._clear_testing_file_on_instance(
fip_1, ssh_user_name, privkey
)
# Write a testing file on instance #1
self._write_testing_file_on_instance(
fip_1, ssh_user_name, privkey)
# Validate the testing file from instance #2
self._validate_testing_file_from_instance(
fip_2, ssh_user_name, privkey)

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Manila Ganesha setup and testing."""

View File

@@ -0,0 +1,46 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Manila Ganesha setup."""
import zaza.openstack.utilities.openstack as openstack_utils
from manilaclient import client as manilaclient
MANILA_GANESHA_TYPE_NAME = "cephfsnfstype"
def setup_ganesha_share_type(manila_client=None):
"""Create a share type for manila with Ganesha.
:param manila_client: Authenticated manilaclient
:type manila_client: manilaclient.Client
"""
if manila_client is None:
keystone_session = openstack_utils.get_overcloud_keystone_session()
manila_client = manilaclient.Client(
session=keystone_session, client_version='2')
manila_client.share_types.create(
name=MANILA_GANESHA_TYPE_NAME, spec_driver_handles_share_servers=False,
extra_specs={
'vendor_name': 'Ceph',
'storage_protocol': 'NFS',
'snapshot_support': False,
})

View File

@@ -0,0 +1,54 @@
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Manila Ganesha testing."""
import logging
from zaza.openstack.charm_tests.manila_ganesha.setup import (
MANILA_GANESHA_TYPE_NAME,
)
import zaza.openstack.charm_tests.manila.tests as manila_tests
import zaza.model
class ManilaGaneshaTests(manila_tests.ManilaBaseTest):
"""Encapsulate Manila Ganesha tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(ManilaGaneshaTests, cls).setUpClass()
cls.share_name = 'cephnfsshare1'
cls.share_type_name = MANILA_GANESHA_TYPE_NAME
cls.share_protocol = 'nfs'
def _restart_share_instance(self):
logging.info('Restarting manila-share and nfs-ganesha')
# It would be better for thie to derive the application name,
# manila-ganesha-az1, from deployed instances fo the manila-ganesha
# charm; however, that functionality isn't present yet in zaza, so
# this is hard coded to the application name used in that charm's
# test bundles.
for unit in zaza.model.get_units('manila-ganesha-az1'):
# While we really only need to run this on the machine hosting
# nfs-ganesha and manila-share, running it everywhere isn't
# harmful. Pacemaker handles restarting the services
zaza.model.run_on_unit(
unit.entity_id,
"systemctl stop manila-share nfs-ganesha")
return True

View File

@@ -0,0 +1,17 @@
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Manila NetApp setup and testing."""

View File

@@ -0,0 +1,96 @@
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Manila NetApp setup."""
import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.charm_tests.neutron.setup as neutron_setup
MANILA_NETAPP_TYPE_NAME = "netapp-ontap"
MANILA_NETAPP_BACKEND_NAME = "netapp-ontap"
MANILA_NETAPP_DHSS_TYPE_NAME = "netapp-ontap-dhss"
MANILA_NETAPP_DHSS_BACKEND_NAME = "netapp-ontap-dhss"
MANILA_NETAPP_SHARE_NET_NAME = "netapp-ontap-share-network"
def create_netapp_share_type(manila_client=None):
"""Create a share type for Manila with NetApp Data ONTAP driver.
:param manila_client: Authenticated manilaclient
:type manila_client: manilaclient.Client
"""
if manila_client is None:
manila_client = openstack_utils.get_manila_session_client(
openstack_utils.get_overcloud_keystone_session())
manila_client.share_types.create(
name=MANILA_NETAPP_TYPE_NAME,
spec_driver_handles_share_servers=False,
extra_specs={
'vendor_name': 'NetApp',
'share_backend_name': MANILA_NETAPP_BACKEND_NAME,
'storage_protocol': 'NFS_CIFS',
})
def create_netapp_dhss_share_type(manila_client=None):
"""Create a DHSS share type for Manila with NetApp Data ONTAP driver.
:param manila_client: Authenticated manilaclient
:type manila_client: manilaclient.Client
"""
if manila_client is None:
manila_client = openstack_utils.get_manila_session_client(
openstack_utils.get_overcloud_keystone_session())
manila_client.share_types.create(
name=MANILA_NETAPP_DHSS_TYPE_NAME,
spec_driver_handles_share_servers=True,
extra_specs={
'vendor_name': 'NetApp',
'share_backend_name': MANILA_NETAPP_DHSS_BACKEND_NAME,
'storage_protocol': 'NFS_CIFS',
})
def create_netapp_share_network(manila_client=None):
"""Create a Manila share network from the existing provider network.
This setup function assumes that 'neutron.setup.basic_overcloud_network'
is called to have the proper tenant networks setup.
The share network will be bound to the provider network configured by
'neutron.setup.basic_overcloud_network'.
"""
session = openstack_utils.get_overcloud_keystone_session()
if manila_client is None:
manila_client = openstack_utils.get_manila_session_client(session)
neutron = openstack_utils.get_neutron_session_client(session)
external_net = neutron.find_resource(
'network',
neutron_setup.OVERCLOUD_NETWORK_CONFIG['external_net_name'])
external_subnet = neutron.find_resource(
'subnet',
neutron_setup.OVERCLOUD_NETWORK_CONFIG['external_subnet_name'])
manila_client.share_networks.create(
name=MANILA_NETAPP_SHARE_NET_NAME,
neutron_net_id=external_net['id'],
neutron_subnet_id=external_subnet['id'])

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env python3
# Copyright 2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encapsulate Manila NetApp testing."""
from zaza.openstack.charm_tests.manila_netapp.setup import (
MANILA_NETAPP_TYPE_NAME,
MANILA_NETAPP_DHSS_TYPE_NAME,
MANILA_NETAPP_SHARE_NET_NAME,
)
import zaza.openstack.charm_tests.manila.tests as manila_tests
class ManilaNetAppNFSTest(manila_tests.ManilaBaseTest):
"""Encapsulate Manila NetApp NFS test."""
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(ManilaNetAppNFSTest, cls).setUpClass()
cls.share_name = 'netapp-ontap-share'
cls.share_type_name = MANILA_NETAPP_TYPE_NAME
cls.share_protocol = 'nfs'
class ManilaNetAppDHSSNFSTest(manila_tests.ManilaBaseTest):
"""Encapsulate Manila NetApp NFS test."""
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(ManilaNetAppDHSSNFSTest, cls).setUpClass()
cls.share_name = 'netapp-ontap-dhss-share'
cls.share_type_name = MANILA_NETAPP_DHSS_TYPE_NAME
cls.share_protocol = 'nfs'
cls.share_network = cls.manila_client.share_networks.find(
name=MANILA_NETAPP_SHARE_NET_NAME)

View File

@@ -18,6 +18,7 @@
from datetime import datetime
import logging
import unittest
import tenacity
import novaclient
@@ -37,7 +38,8 @@ class MasakariTest(test_utils.OpenStackBaseTest):
@classmethod
def setUpClass(cls):
"""Run class setup for running tests."""
super(MasakariTest, cls).setUpClass()
super(MasakariTest, cls).setUpClass(application_name="masakari")
cls.current_release = openstack_utils.get_os_release()
cls.keystone_session = openstack_utils.get_overcloud_keystone_session()
cls.model_name = zaza.model.get_juju_model()
cls.nova_client = openstack_utils.get_nova_session_client(
@@ -132,8 +134,32 @@ class MasakariTest(test_utils.OpenStackBaseTest):
vm_uuid,
model_name=self.model_name)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=2, max=60),
reraise=True, stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_exception_type(AssertionError))
def wait_for_guest_ready(self, vm_name):
"""Wait for the guest to be ready.
:param vm_name: Name of guest to check.
:type vm_name: str
"""
guest_ready_attr_checks = [
('OS-EXT-STS:task_state', None),
('status', 'ACTIVE'),
('OS-EXT-STS:power_state', 1),
('OS-EXT-STS:vm_state', 'active')]
guest = self.nova_client.servers.find(name=vm_name)
logging.info('Checking guest {} attributes'.format(vm_name))
for (attr, required_state) in guest_ready_attr_checks:
logging.info('Checking {} is {}'.format(attr, required_state))
assert getattr(guest, attr) == required_state
def test_instance_failover(self):
"""Test masakari managed guest migration."""
# Workaround for Bug #1874719
zaza.openstack.configure.hacluster.remove_node(
'masakari',
'node1')
# Launch guest
self.assertTrue(
zaza.openstack.configure.hacluster.check_all_nodes_online(
@@ -162,11 +188,18 @@ class MasakariTest(test_utils.OpenStackBaseTest):
model_name=self.model_name)
openstack_utils.enable_all_nova_services(self.nova_client)
zaza.openstack.configure.masakari.enable_hosts()
self.wait_for_guest_ready(vm_name)
def test_instance_restart_on_fail(self):
"""Test singlee guest crash and recovery."""
"""Test single guest crash and recovery."""
if self.current_release < openstack_utils.get_os_release(
'bionic_ussuri'):
raise unittest.SkipTest(
"Not supported on {}. Bug #1866638".format(
self.current_release))
vm_name = 'zaza-test-instance-failover'
vm = self.ensure_guest(vm_name)
self.wait_for_guest_ready(vm_name)
_, unit_name = self.get_guests_compute_info(vm_name)
logging.info('{} is running on {}'.format(vm_name, unit_name))
guest_pid = self.get_guest_qemu_pid(
@@ -192,6 +225,6 @@ class MasakariTest(test_utils.OpenStackBaseTest):
unit_name,
vm.id,
model_name=self.model_name)
logging.info('{} pid is now {}'.format(vm_name, guest_pid))
logging.info('{} pid is now {}'.format(vm_name, new_guest_pid))
assert new_guest_pid and new_guest_pid != guest_pid, (
"Restart failed or never happened")

View File

@@ -14,10 +14,11 @@
"""MySQL/Percona Cluster Testing."""
import json
import logging
import os
import re
import tempfile
import tenacity
import zaza.charm_lifecycle.utils as lifecycle_utils
@@ -28,31 +29,20 @@ import zaza.openstack.utilities.openstack as openstack_utils
import zaza.openstack.utilities.generic as generic_utils
class MySQLTest(test_utils.OpenStackBaseTest):
PXC_SEEDED_FILE = "/var/lib/percona-xtradb-cluster/seeded"
class MySQLBaseTest(test_utils.OpenStackBaseTest):
"""Base for mysql charm tests."""
@classmethod
def setUpClass(cls):
def setUpClass(cls, application_name=None):
"""Run class setup for running mysql tests."""
super(MySQLTest, cls).setUpClass()
super().setUpClass(application_name=application_name)
cls.application = "mysql"
cls.services = ["mysqld"]
class PerconaClusterTest(test_utils.OpenStackBaseTest):
"""Base for percona-cluster charm tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running percona-cluster tests."""
super(PerconaClusterTest, cls).setUpClass()
cls.application = "percona-cluster"
# This is the service pidof will attempt to find
# rather than what systemctl uses
cls.services = ["mysqld"]
cls.vip = os.environ.get("OS_VIP00")
cls.leader = None
cls.non_leaders = []
# Config file affected by juju set config change
cls.conf_file = "/etc/mysql/mysql.conf.d/mysqld.cnf"
def get_root_password(self):
"""Get the MySQL root password.
@@ -64,6 +54,198 @@ class PerconaClusterTest(test_utils.OpenStackBaseTest):
self.application,
"leader-get root-password")["Stdout"].strip()
def get_leaders_and_non_leaders(self):
"""Get leader node and non-leader nodes of percona.
Update and set on the object the leader node and list of non-leader
nodes.
:returns: None
:rtype: None
"""
status = zaza.model.get_status().applications[self.application]
# Reset
self.leader = None
self.non_leaders = []
for unit in status["units"]:
if status["units"][unit].get("leader"):
self.leader = unit
else:
self.non_leaders.append(unit)
return self.leader, self.non_leaders
def get_cluster_status(self):
"""Get cluster status.
Return cluster status dict from the cluster-status action or raise
assertion error.
:returns: Dictionary of cluster status
:rtype: dict
"""
logging.info("Running cluster-status action")
action = zaza.model.run_action_on_leader(
self.application,
"cluster-status",
action_params={})
assert action.data.get("results") is not None, (
"Cluster status action failed: No results: {}"
.format(action.data))
assert action.data["results"].get("cluster-status") is not None, (
"Cluster status action failed: No cluster-status: {}"
.format(action.data))
return json.loads(action.data["results"]["cluster-status"])
def get_rw_primary_node(self):
"""Get RW primary node.
Return RW primary node unit.
:returns: Unit object of primary node
:rtype: Union[Unit, None]
"""
_status = self.get_cluster_status()
_primary_ip = _status['groupInformationSourceMember']
if ":" in _primary_ip:
_primary_ip = _primary_ip.split(':')[0]
units = zaza.model.get_units(self.application_name)
for unit in units:
if _primary_ip in unit.public_address:
return unit
def get_blocked_mysql_routers(self):
"""Get blocked mysql routers.
:returns: List of blocked mysql-router unit names
:rtype: List[str]
"""
# Make sure mysql-router units are up to date
# We cannot assume they are as there is up to a five minute delay
mysql_router_units = []
for application in self.get_applications_with_substring_in_name(
"mysql-router"):
for unit in zaza.model.get_units(application):
mysql_router_units.append(unit.entity_id)
self.run_update_status_hooks(mysql_router_units)
# Get up to date status
status = zaza.model.get_status().applications
blocked_mysql_routers = []
# Check if the units are blocked
for application in self.get_applications_with_substring_in_name(
"mysql-router"):
# Subordinate dance with primary
# There is no satus[applicatoin]["units"] for subordinates
_subordinate_to = status[application].subordinate_to[0]
for appunit in status[_subordinate_to].units:
for subunit in (
status[_subordinate_to].
units[appunit].subordinates.keys()):
if "blocked" in (
status[_subordinate_to].units[appunit].
subordinates[subunit].workload_status.status):
blocked_mysql_routers.append(subunit)
return blocked_mysql_routers
def restart_blocked_mysql_routers(self):
"""Restart blocked mysql routers.
:returns: None
:rtype: None
"""
# Check for blocked mysql-router units
blocked_mysql_routers = self.get_blocked_mysql_routers()
for unit in blocked_mysql_routers:
logging.warning(
"Restarting blocked mysql-router unit {}"
.format(unit))
zaza.model.run_on_unit(
unit,
"systemctl restart {}".format(unit.rpartition("/")[0]))
class MySQLCommonTests(MySQLBaseTest):
"""Common mysql charm tests."""
def test_110_mysqldump(self):
"""Backup mysql.
Run the mysqldump action.
"""
_db = "keystone"
_file_key = "mysqldump-file"
logging.info("Execute mysqldump action")
# Need to change strict mode to be able to dump database
if self.application_name == "percona-cluster":
action = zaza.model.run_action_on_leader(
self.application_name,
"set-pxc-strict-mode",
action_params={"mode": "MASTER"})
action = zaza.model.run_action_on_leader(
self.application,
"mysqldump",
action_params={"databases": _db})
_results = action.data["results"]
assert _db in _results[_file_key], (
"Mysqldump action failed: {}".format(action.data))
logging.info("Passed mysqldump action test.")
def test_910_restart_on_config_change(self):
"""Checking restart happens on config change.
Change max connections and assert that change propagates to the correct
file and that services are restarted as a result
"""
# Expected default and alternate values
set_default = {"max-connections": "600"}
set_alternate = {"max-connections": "1000"}
# Make config change, check for service restarts
logging.info("Setting max connections ...")
self.restart_on_changed(
self.conf_file,
set_default,
set_alternate,
{}, {},
self.services)
logging.info("Passed restart on changed test.")
def test_920_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started
"""
with self.pause_resume(self.services):
logging.info("Testing pause resume")
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
# If there are any blocekd mysql routers restart them.
self.restart_blocked_mysql_routers()
assert not self.get_blocked_mysql_routers(), (
"Should no longer be blocked mysql-router units")
logging.info("Passed pause and resume test.")
class PerconaClusterBaseTest(MySQLBaseTest):
"""Base for percona-cluster charm tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running percona-cluster tests."""
super().setUpClass()
cls.application = "percona-cluster"
# This is the service pidof will attempt to find
# rather than what systemctl uses
cls.services = ["mysqld"]
cls.vip = os.environ.get("TEST_VIP00")
# Config file affected by juju set config change
cls.conf_file = "/etc/mysql/percona-xtradb-cluster.conf.d/mysqld.cnf"
def get_wsrep_value(self, attr):
"""Get wsrrep value from the DB.
@@ -78,7 +260,7 @@ class PerconaClusterTest(test_utils.OpenStackBaseTest):
output = zaza.model.run_on_leader(
self.application, cmd)["Stdout"].strip()
value = re.search(r"^.+?\s+(.+)", output).group(1)
logging.debug("%s = %s" % (attr, value))
logging.info("%s = %s" % (attr, value))
return value
def is_pxc_bootstrapped(self):
@@ -116,7 +298,7 @@ class PerconaClusterTest(test_utils.OpenStackBaseTest):
cmd = "ip -br addr"
result = zaza.model.run_on_unit(unit.entity_id, cmd)
output = result.get("Stdout").strip()
logging.debug(output)
logging.info(output)
if self.vip in output:
logging.info("vip ({}) running in {}".format(
self.vip,
@@ -124,39 +306,13 @@ class PerconaClusterTest(test_utils.OpenStackBaseTest):
)
return unit.entity_id
def update_leaders_and_non_leaders(self):
"""Get leader node and non-leader nodes of percona.
Update and set on the object the leader node and list of non-leader
nodes.
:returns: None
:rtype: None
"""
status = zaza.model.get_status().applications[self.application]
# Reset
self.leader = None
self.non_leaders = []
for unit in status["units"]:
if status["units"][unit].get("leader"):
self.leader = unit
else:
self.non_leaders.append(unit)
class PerconaClusterCharmTests(PerconaClusterTest):
"""Base for percona-cluster charm tests.
class PerconaClusterCharmTests(MySQLCommonTests, PerconaClusterBaseTest):
"""Percona-cluster charm tests.
.. note:: these have tests have been ported from amulet tests
"""
@classmethod
def setUpClass(cls):
"""Run class setup for running percona-cluster tests."""
super(PerconaClusterTest, cls).setUpClass()
cls.application = "percona-cluster"
cls.services = ["mysqld"]
def test_100_bootstrapped_and_clustered(self):
"""Ensure PXC is bootstrapped and that peer units are clustered."""
self.units = zaza.model.get_application_config(
@@ -171,37 +327,9 @@ class PerconaClusterCharmTests(PerconaClusterTest):
" (wanted=%s, cluster_size=%s)" % (self.units, cluster_size))
assert cluster_size >= self.units, msg
def test_110_restart_on_config_change(self):
"""Checking restart happens on config change.
Change disk format and assert then change propagates to the correct
file and that services are restarted as a result
"""
# Expected default and alternate values
set_default = {"peer-timeout": "PT3S"}
set_alternate = {"peer-timeout": "PT15S"}
# Config file affected by juju set config change
conf_file = "/etc/mysql/percona-xtradb-cluster.conf.d/mysqld.cnf"
# Make config change, check for service restarts
logging.debug("Setting peer timeout ...")
self.restart_on_changed(
conf_file,
set_default,
set_alternate,
{}, {},
self.services)
logging.info("Passed restart on changed")
def test_120_pause_resume(self):
"""Run pause and resume tests.
Pause service and check services are stopped then resume and check
they are started
"""
with self.pause_resume(self.services):
logging.info("Testing pause resume")
logging.info("Ensuring PXC seeded file is present")
zaza.model.block_until_file_has_contents(self.application,
PXC_SEEDED_FILE, "done")
def test_130_change_root_password(self):
"""Change root password.
@@ -233,7 +361,7 @@ class PerconaClusterCharmTests(PerconaClusterTest):
assert code == "0", output
class PerconaClusterColdStartTest(PerconaClusterTest):
class PerconaClusterColdStartTest(PerconaClusterBaseTest):
"""Percona Cluster cold start tests."""
@classmethod
@@ -244,8 +372,6 @@ class PerconaClusterColdStartTest(PerconaClusterTest):
openstack_utils.get_undercloud_keystone_session())
cls.nova_client = openstack_utils.get_nova_session_client(
cls.overcloud_keystone_session)
cls.machines = (
juju_utils.get_machine_uuids_for_application(cls.application))
def resolve_update_status_errors(self):
"""Resolve update-status hooks error.
@@ -269,25 +395,26 @@ class PerconaClusterColdStartTest(PerconaClusterTest):
After bootstrapping a non-leader node, notify bootstrapped on the
leader node.
"""
_machines = sorted(
juju_utils.get_machine_uuids_for_application(self.application))
# Stop Nodes
self.machines.sort()
# Avoid hitting an update-status hook
logging.debug("Wait till model is idle ...")
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Stopping instances: {}".format(self.machines))
for uuid in self.machines:
logging.info("Stopping instances: {}".format(_machines))
for uuid in _machines:
self.nova_client.servers.stop(uuid)
logging.debug("Wait till all machines are shutoff ...")
for uuid in self.machines:
logging.info("Wait till all machines are shutoff ...")
for uuid in _machines:
openstack_utils.resource_reaches_status(self.nova_client.servers,
uuid,
expected_status='SHUTOFF',
stop_after_attempt=16)
# Start nodes
self.machines.sort(reverse=True)
logging.info("Starting instances: {}".format(self.machines))
for uuid in self.machines:
_machines.sort(reverse=True)
logging.info("Starting instances: {}".format(_machines))
for uuid in _machines:
self.nova_client.servers.start(uuid)
for unit in zaza.model.get_units(self.application):
@@ -296,7 +423,7 @@ class PerconaClusterColdStartTest(PerconaClusterTest):
'unknown',
negate_match=True)
logging.debug("Wait till model is idle ...")
logging.info("Wait till model is idle ...")
# XXX If a hook was executing on a unit when it was powered off
# it comes back in an error state.
try:
@@ -305,7 +432,7 @@ class PerconaClusterColdStartTest(PerconaClusterTest):
self.resolve_update_status_errors()
zaza.model.block_until_all_units_idle()
logging.debug("Wait for application states ...")
logging.info("Wait for application states ...")
for unit in zaza.model.get_units(self.application):
try:
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
@@ -318,17 +445,17 @@ class PerconaClusterColdStartTest(PerconaClusterTest):
zaza.model.wait_for_application_states(states=states)
# Update which node is the leader and which are not
self.update_leaders_and_non_leaders()
_leader, _non_leaders = self.get_leaders_and_non_leaders()
# We want to test the worst possible scenario which is the
# non-leader with the highest sequence number. We will use the leader
# for the notify-bootstrapped after. They just need to be different
# units.
logging.info("Execute bootstrap-pxc action after cold boot ...")
zaza.model.run_action(
self.non_leaders[0],
_non_leaders[0],
"bootstrap-pxc",
action_params={})
logging.debug("Wait for application states ...")
logging.info("Wait for application states ...")
for unit in zaza.model.get_units(self.application):
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
states = {"percona-cluster": {
@@ -342,10 +469,10 @@ class PerconaClusterColdStartTest(PerconaClusterTest):
self.application,
"notify-bootstrapped",
action_params={})
logging.debug("Wait for application states ...")
logging.info("Wait for application states ...")
for unit in zaza.model.get_units(self.application):
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
test_config = lifecycle_utils.get_charm_config()
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {}))
@@ -367,17 +494,9 @@ def retry_is_new_crm_master(test, old_crm_master):
return False
class PerconaClusterScaleTests(PerconaClusterTest):
class PerconaClusterScaleTests(PerconaClusterBaseTest):
"""Percona Cluster scale tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running percona scale tests.
.. note:: these have tests have been ported from amulet tests
"""
super(PerconaClusterScaleTests, cls).setUpClass()
def test_100_kill_crm_master(self):
"""Ensure VIP failover.
@@ -403,3 +522,511 @@ class PerconaClusterScaleTests(PerconaClusterTest):
# always true.
assert generic_utils.is_port_open("3306", self.vip), \
"Cannot connect to vip"
class MySQLInnoDBClusterTests(MySQLCommonTests):
"""Mysql-innodb-cluster charm tests.
Note: The restart on changed and pause/resume tests also validate the
changing of the R/W primary. On each mysqld shutodown a new R/W primary is
elected automatically by MySQL.
"""
@classmethod
def setUpClass(cls):
"""Run class setup for running mysql-innodb-cluster tests."""
super().setUpClass()
cls.application = "mysql-innodb-cluster"
def test_100_cluster_status(self):
"""Checking cluster status.
Run the cluster-status action.
"""
logging.info("Execute cluster-status action")
cluster_status = self.get_cluster_status()
assert "OK" in cluster_status["defaultReplicaSet"]["status"], (
"Cluster status is not OK: {}"
.format(cluster_status))
logging.info("Passed cluster-status action test.")
def test_120_set_cluster_option(self):
"""Set cluster option.
Run the set-cluster-option action.
"""
_key = "autoRejoinTries"
_value = "500"
logging.info("Set cluster option {}={}".format(_key, _value))
action = zaza.model.run_action_on_leader(
self.application,
"set-cluster-option",
action_params={"key": _key, "value": _value})
assert "Success" in action.data["results"]["outcome"], (
"Set cluster option {}={} action failed: {}"
.format(_key, _value, action.data))
logging.info("Passed set cluster option action test.")
class MySQLInnoDBClusterColdStartTest(MySQLBaseTest):
"""Percona Cluster cold start tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running percona-cluster cold start tests."""
super().setUpClass()
cls.application = "mysql-innodb-cluster"
cls.overcloud_keystone_session = (
openstack_utils.get_undercloud_keystone_session())
cls.nova_client = openstack_utils.get_nova_session_client(
cls.overcloud_keystone_session)
def resolve_update_status_errors(self):
"""Resolve update-status hooks error.
This should *only* be used after an instance hard reboot to handle the
situation where a update-status hook was running when the unit was
rebooted.
"""
zaza.model.resolve_units(
application_name=self.application,
erred_hook='update-status',
wait=True, timeout=180)
def test_100_reboot_cluster_from_complete_outage(self):
"""Reboot cluster from complete outage.
After a cold start, reboot cluster from complete outage.
"""
_machines = sorted(
juju_utils.get_machine_uuids_for_application(self.application))
# Stop Nodes
# Avoid hitting an update-status hook
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Stopping instances: {}".format(_machines))
for uuid in _machines:
self.nova_client.servers.stop(uuid)
logging.info("Wait till all machines are shutoff ...")
for uuid in _machines:
openstack_utils.resource_reaches_status(self.nova_client.servers,
uuid,
expected_status='SHUTOFF',
stop_after_attempt=16)
# Start nodes
_machines.sort(reverse=True)
logging.info("Starting instances: {}".format(_machines))
for uuid in _machines:
self.nova_client.servers.start(uuid)
logging.info(
"Wait till all {} units are in state 'unkown' ..."
.format(self.application))
for unit in zaza.model.get_units(self.application):
zaza.model.block_until_unit_wl_status(
unit.entity_id,
'unknown',
negate_match=True)
logging.info("Wait till model is idle ...")
try:
zaza.model.block_until_all_units_idle()
except zaza.model.UnitError:
self.resolve_update_status_errors()
zaza.model.block_until_all_units_idle()
logging.info("Clear error hooks after reboot ...")
for unit in zaza.model.get_units(self.application):
try:
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
except zaza.model.UnitError:
self.resolve_update_status_errors()
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
logging.info(
"Wait till all {} units are in state 'blocked' ..."
.format(self.application))
for unit in zaza.model.get_units(self.application):
zaza.model.block_until_unit_wl_status(
unit.entity_id,
'blocked')
# Wait until update-status hooks have completed
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Execute reboot-cluster-from-complete-outage "
"action after cold boot ...")
# We do not know which unit has the most up to date data
# run reboot-cluster-from-complete-outage until we get a success.
for unit in zaza.model.get_units(self.application):
action = zaza.model.run_action(
unit.entity_id,
"reboot-cluster-from-complete-outage",
action_params={})
if "Success" in action.data.get("results", {}).get("outcome", ""):
break
else:
logging.info(action.data.get("results", {}).get("output", ""))
assert "Success" in action.data["results"]["outcome"], (
"Reboot cluster from complete outage action failed: {}"
.format(action.data))
logging.info("Wait for application states ...")
for unit in zaza.model.get_units(self.application):
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {}))
class MySQL8MigrationTests(MySQLBaseTest):
"""Percona Cluster to MySQL InnoDB Cluster Tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running migration tests."""
# Having application_name set avoids breakage in the
# OpenStackBaseTest class when running bundle tests without
# charm_name specified
super().setUpClass(application_name="mysql-innodb-cluster")
def test_999_migrate_percona_to_mysql(self):
"""Migrate DBs from percona-cluster to mysql-innodb-cluster.
Do not rely on self.application_name or other pre-set class values as
we will be pointing to both percona-cluster and mysql-innodb-cluster.
"""
# Map application name to db name
apps_to_dbs = {
"keystone": ["keystone"],
"glance": ["glance"],
"cinder": ["cinder"],
"nova-cloud-controller": ["nova", "nova_api", "nova_cell0"],
"neutron-api": ["neutron"],
"openstack-dashboard": ["horizon"],
"placement": ["placement"],
"vault": ["vault"]}
# TODO: This could do an automated check of what is actually deployed
dbs = [db for mapped_dbs in apps_to_dbs.values() for db in mapped_dbs]
percona_application = "percona-cluster"
mysql_application = "mysql-innodb-cluster"
percona_leader = zaza.model.get_unit_from_name(
zaza.model.get_lead_unit_name(percona_application))
mysql_leader = zaza.model.get_unit_from_name(
zaza.model.get_lead_unit_name(mysql_application))
logging.info("Remove percona-cluster:shared-db relations ...")
for app in apps_to_dbs.keys():
# Remove relations
zaza.model.remove_relation(
percona_application,
"{}:shared-db".format(percona_application),
"{}:shared-db".format(app))
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
# Set PXC Strict Mode to MASTER
logging.info("Set PXC Strict Mode MASTER ...")
action = zaza.model.run_action_on_leader(
percona_application,
"set-pxc-strict-mode",
action_params={"mode": "MASTER"})
assert "failed" not in action.data["status"], (
"Set PXC Strict Mode MASTER action failed: {}"
.format(action.data))
# Dump the percona db
logging.info("mysqldump percona-cluster DBs ...")
action = zaza.model.run_action_on_leader(
percona_application,
"mysqldump",
action_params={
"databases": ",".join(dbs)})
assert "failed" not in action.data["status"], (
"mysqldump action failed: {}"
.format(action.data))
remote_file = action.data["results"]["mysqldump-file"]
remote_backup_dir = "/var/backups/mysql"
# Permissions for ubuntu user to read
logging.info("Set permissions to read percona-cluster:{} ..."
.format(remote_backup_dir))
zaza.model.run_on_leader(
percona_application,
"chmod 755 {}".format(remote_backup_dir))
# SCP back and forth
dump_file = "dump.sql.gz"
logging.info("SCP percona-cluster:{} to mysql-innodb-cluster:{} ..."
.format(remote_file, dump_file))
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_file = "{}/{}".format(tmpdirname, dump_file)
zaza.model.scp_from_unit(
percona_leader.name,
remote_file,
tmp_file)
zaza.model.scp_to_unit(
mysql_leader.name,
tmp_file,
dump_file)
# Restore mysqldump to mysql-innodb-cluster
logging.info("restore-mysqldump DBs onto mysql-innodb-cluster ...")
action = zaza.model.run_action_on_leader(
mysql_application,
"restore-mysqldump",
action_params={
"dump-file": "/home/ubuntu/{}".format(dump_file)})
assert "failed" not in action.data["status"], (
"restore-mysqldump action failed: {}"
.format(action.data))
# Add db router relations
logging.info("Add mysql-router:shared-db relations ...")
for app in apps_to_dbs.keys():
# add relations
zaza.model.add_relation(
mysql_application,
"{}:shared-db".format(app),
"{}-mysql-router:shared-db".format(app))
# Set PXC Strict Mode back to ENFORCING
logging.info("Set PXC Strict Mode ENFORCING ...")
action = zaza.model.run_action_on_leader(
percona_application,
"set-pxc-strict-mode",
action_params={"mode": "ENFORCING"})
assert "failed" not in action.data["status"], (
"Set PXC Strict Mode ENFORCING action failed: {}"
.format(action.data))
logging.info("Wait for application states ...")
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {}))
class MySQLInnoDBClusterScaleTest(MySQLBaseTest):
"""Percona Cluster cold start tests."""
@classmethod
def setUpClass(cls):
"""Run class setup for running mysql-innodb-cluster scale tests."""
super().setUpClass()
cls.application = "mysql-innodb-cluster"
cls.test_config = lifecycle_utils.get_charm_config(fatal=False)
cls.states = cls.test_config.get("target_deploy_status", {})
def test_800_remove_leader(self):
"""Remove leader node.
We start with a three node cluster, remove one, down to two.
The cluster will be in waiting state.
"""
logging.info("Scale in test: remove leader")
leader, nons = self.get_leaders_and_non_leaders()
leader_unit = zaza.model.get_unit_from_name(leader)
# Wait until we are idle in the hopes clients are not running
# update-status hooks
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
zaza.model.destroy_unit(self.application_name, leader)
logging.info("Wait until all only 2 units ...")
zaza.model.block_until_unit_count(self.application, 2)
logging.info("Wait until all units are cluster incomplete ...")
zaza.model.block_until_wl_status_info_starts_with(
self.application, "'cluster' incomplete")
# Show status
logging.info(self.get_cluster_status())
logging.info(
"Removing old unit from cluster: {} "
.format(leader_unit.public_address))
action = zaza.model.run_action(
nons[0],
"remove-instance",
action_params={
"address": leader_unit.public_address,
"force": True})
assert action.data.get("results") is not None, (
"Remove instance action failed: No results: {}"
.format(action.data))
def test_801_add_unit(self):
"""Add mysql-innodb-cluster node.
We start with two node cluster in waiting, add one, back to a full
cluster of three.
"""
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Adding unit after removed unit ...")
zaza.model.add_unit(self.application_name)
logging.info("Wait until 3 units ...")
zaza.model.block_until_unit_count(self.application, 3)
logging.info("Wait for application states ...")
zaza.model.wait_for_application_states(states=self.states)
def test_802_add_unit(self):
"""Add another mysql-innodb-cluster node.
We start with a three node full cluster, add another, up to a four node
cluster.
"""
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Adding unit after full cluster ...")
zaza.model.add_unit(self.application_name)
logging.info("Wait until 4 units ...")
zaza.model.block_until_unit_count(self.application, 4)
logging.info("Wait for application states ...")
zaza.model.wait_for_application_states(states=self.states)
def test_803_remove_fourth(self):
"""Remove mysql-innodb-cluster node.
We start with a four node full cluster, remove one, down to a three
node full cluster.
"""
leader, nons = self.get_leaders_and_non_leaders()
non_leader_unit = zaza.model.get_unit_from_name(nons[0])
# Wait until we are idle in the hopes clients are not running
# update-status hooks
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
zaza.model.destroy_unit(self.application_name, nons[0])
logging.info("Scale in test: back down to three")
logging.info("Wait until 3 units ...")
zaza.model.block_until_unit_count(self.application, 3)
logging.info("Wait for status ready ...")
zaza.model.wait_for_application_states(states=self.states)
# Show status
logging.info(self.get_cluster_status())
logging.info(
"Removing old unit from cluster: {} "
.format(non_leader_unit.public_address))
action = zaza.model.run_action(
leader,
"remove-instance",
action_params={
"address": non_leader_unit.public_address,
"force": True})
assert action.data.get("results") is not None, (
"Remove instance action failed: No results: {}"
.format(action.data))
class MySQLInnoDBClusterPartitionTest(MySQLBaseTest):
"""MySQL partition handling."""
def test_850_force_quorum_using_partition_of(self):
"""Force quorum using partition of instance with given address.
After outage, cluster can end up without quorum. Force it.
"""
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
# Block all traffic across mysql instances: 0<-1, 1<-2 and 2<-0
mysql_units = [unit for unit in zaza.model.get_units(self.application)]
no_of_units = len(mysql_units)
for index, unit in enumerate(mysql_units):
next_unit = mysql_units[(index+1) % no_of_units]
ip_address = next_unit.public_address
cmd = "sudo iptables -A INPUT -s {} -j DROP".format(ip_address)
zaza.model.async_run_on_unit(unit, cmd)
logging.info(
"Wait till all {} units are in state 'blocked' ..."
.format(self.application))
for unit in zaza.model.get_units(self.application):
zaza.model.block_until_unit_wl_status(
unit.entity_id,
'blocked',
negate_match=True)
logging.info("Wait till model is idle ...")
zaza.model.block_until_all_units_idle()
logging.info("Execute force-quorum-using-partition-of action ...")
# Select "quorum leader" unit
leader_unit = mysql_units[0]
action = zaza.model.run_action(
leader_unit.entity_id,
"force-quorum-using-partition-of",
action_params={
"address": leader_unit.public_address,
'i-really-mean-it': True
})
assert action.data.get("results") is not None, (
"Force quorum using partition of action failed: {}"
.format(action.data))
logging.debug(
"Results from running 'force-quorum' command ...\n{}".format(
action.data))
logging.info("Wait till model is idle ...")
try:
zaza.model.block_until_all_units_idle()
except zaza.model.UnitError:
self.resolve_update_status_errors()
zaza.model.block_until_all_units_idle()
# Unblock all traffic across mysql instances
for unit in zaza.model.get_units(self.application):
cmd = "sudo iptables -F"
zaza.model.async_run_on_unit(unit, cmd)
logging.info("Wait for application states ...")
for unit in zaza.model.get_units(self.application):
zaza.model.run_on_unit(unit.entity_id, "hooks/update-status")
test_config = lifecycle_utils.get_charm_config(fatal=False)
zaza.model.wait_for_application_states(
states=test_config.get("target_deploy_status", {}))
class MySQLRouterTests(test_utils.OpenStackBaseTest):
"""MySQL Router Tests."""
@classmethod
def setUpClass(cls, application_name="keystone-mysql-router"):
"""Run class setup for running mysql-router tests."""
super().setUpClass(application_name=application_name)
cls.application = application_name
cls.services = ["mysqlrouter"]
# Config file affected by juju set config change
cls.conf_file = (
"/var/lib/mysql/{}-mysql-router/mysqlrouter.conf"
.format(application_name))
def test_910_restart_on_config_change(self):
"""Checking restart happens on config change.
Change max connections and assert that change propagates to the correct
file and that services are restarted as a result
"""
# Expected default and alternate values
set_default = {"ttl": ".5"}
set_alternate = {"ttl": "7"}
# Make config change, check for service restarts
logging.info("Setting TTL ...")
self.restart_on_changed(
self.conf_file,
set_default,
set_alternate,
{}, {},
self.services)
logging.info("Passed restart on changed test.")

View File

@@ -0,0 +1,34 @@
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module of functions for interfacing with the percona-cluster charm."""
import zaza.model as model
async def complete_cluster_series_upgrade():
"""Run the complete-cluster-series-upgrade action on the lead unit."""
# Note that some models use mysql as the application name, and other's use
# percona-cluster. Try mysql first, and if it doesn't exist, then try
# percona-cluster instead.
try:
await model.async_run_action_on_leader(
'mysql',
'complete-cluster-series-upgrade',
action_params={})
except KeyError:
await model.async_run_action_on_leader(
'percona-cluster',
'complete-cluster-series-upgrade',
action_params={})

View File

@@ -1,5 +1,3 @@
#!/usr/bin/env python3
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,6 +14,9 @@
"""Setup for Neutron deployments."""
import functools
import logging
from zaza.openstack.configure import (
network,
)
@@ -25,7 +26,8 @@ from zaza.openstack.utilities import (
juju as juju_utils,
openstack as openstack_utils,
)
import zaza.model as model
import zaza.charm_lifecycle.utils as lifecycle_utils
# The overcloud network configuration settings are declared.
@@ -57,12 +59,14 @@ DEFAULT_UNDERCLOUD_NETWORK_CONFIG = {
}
def basic_overcloud_network():
def basic_overcloud_network(limit_gws=None):
"""Run setup for neutron networking.
Configure the following:
The overcloud network using subnet pools
:param limit_gws: Limit the number of gateways that get a port attached
:type limit_gws: int
"""
cli_utils.setup_logging()
@@ -74,19 +78,49 @@ def basic_overcloud_network():
network_config.update(DEFAULT_UNDERCLOUD_NETWORK_CONFIG)
# Environment specific settings
network_config.update(generic_utils.get_undercloud_env_vars())
# Deployed model settings
if (model.get_application_config('neutron-api')
.get('enable-dvr').get('value')):
network_config.update({"dvr_enabled": True})
# Get keystone session
keystone_session = openstack_utils.get_overcloud_keystone_session()
# Handle network for Openstack-on-Openstack scenarios
if juju_utils.get_provider_type() == "openstack":
# Get optional use_juju_wait for netw ork option
options = (lifecycle_utils
.get_charm_config(fatal=False)
.get('configure_options', {}))
use_juju_wait = options.get(
'configure_gateway_ext_port_use_juju_wait', True)
# Handle network for OpenStack-on-OpenStack scenarios
provider_type = juju_utils.get_provider_type()
if provider_type == "openstack":
undercloud_ks_sess = openstack_utils.get_undercloud_keystone_session()
network.setup_gateway_ext_port(network_config,
keystone_session=undercloud_ks_sess)
keystone_session=undercloud_ks_sess,
limit_gws=limit_gws,
use_juju_wait=use_juju_wait)
elif provider_type == "maas":
# NOTE(fnordahl): After validation of the MAAS+Netplan Open vSwitch
# integration support, we would most likely want to add multiple modes
# of operation with MAAS.
#
# Perform charm based OVS configuration
openstack_utils.configure_charmed_openstack_on_maas(
network_config, limit_gws=limit_gws)
else:
logging.warning('Unknown Juju provider type, "{}", will not perform'
' charm network configuration.'
.format(provider_type))
# Confugre the overcloud network
network.setup_sdn(network_config, keystone_session=keystone_session)
# Configure function to get one gateway with external network
overcloud_network_one_gw = functools.partial(
basic_overcloud_network,
limit_gws=1)
# Configure function to get two gateways with external network
overcloud_network_two_gws = functools.partial(
basic_overcloud_network,
limit_gws=2)

Some files were not shown because too many files have changed in this diff Show More