diff --git a/.github/workflows/tox.yaml b/.github/workflows/tox.yaml new file mode 100644 index 0000000..754ce29 --- /dev/null +++ b/.github/workflows/tox.yaml @@ -0,0 +1,27 @@ +name: Python package + +on: + - push + - pull_request + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.5, 3.6, 3.7, 3.8, 3.9] + + steps: + - uses: actions/checkout@v1 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install tox tox-gh-actions + - name: Lint with tox + run: tox -e pep8 + - name: Test with tox + run: tox -e py${{ matrix.python-version }} \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index c1558f3..69d0fed 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,9 @@ sudo: true dist: xenial language: python -install: pip install tox-travis +install: + - pip install tox-travis + - pip install codecov matrix: include: - name: "Python 3.5" @@ -13,5 +15,10 @@ matrix: - name: "Python 3.7" python: 3.7 env: ENV=pep8,py3 + - name: "Python 3.8" + python: 3.7 + env: ENV=pep8,py3 script: - tox -c tox.ini -e $ENV +after_success: + - codecov --verbose --gcov-glob unit_tests/* \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..96aeb92 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +recursive-include zaza/openstack *.j2 diff --git a/README.md b/README.md index 87f6ad6..78cc9cb 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ charm_name: pacemaker-remote tests: - zaza.openstack.charm_tests.pacemaker_remote.tests.PacemakerRemoteTest configure: - - zaza.openstack.charm_tests.noop.setup.basic_setup + - zaza.charm_tests.noop.setup.basic_setup gate_bundles: - basic smoke_bundles: @@ -23,4 +23,4 @@ test-requirements.txt: ``` git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack -``` \ No newline at end of file +``` diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000..d583fd7 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,2 @@ +ignore: + - "zaza/openstack/charm_tests/**/*tests.py" diff --git a/requirements.txt b/requirements.txt index 1ee936a..adec5bc 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,14 @@ - +# pin lxml < 4.6.3 for py35 as no wheels exist for 4.6.3 (deprecated platform) +# This is necessary for Xenial builders +# BUG: https://github.com/openstack-charmers/zaza-openstack-tests/issues/530 +lxml<4.6.3 aiounittest async_generator -juju +boto3 +juju!=2.8.3 # blacklist 2.8.3 as it appears to have a connection bug juju_wait -PyYAML<=4.2,>=3.0 -flake8>=2.2.4,<=3.5.0 +PyYAML<=4.2,>=3.0 +flake8>=2.2.4 flake8-docstrings flake8-per-file-ignores pydocstyle<4.0.0 @@ -21,12 +25,17 @@ dnspython>=1.12.0 psutil>=1.1.1,<2.0.0 python-openstackclient>=3.14.0 aodhclient +gnocchiclient>=7.0.5,<8.0.0 +pika>=1.1.0,<2.0.0 +python-barbicanclient python-designateclient python-ceilometerclient python-cinderclient python-glanceclient python-heatclient +python-ironicclient python-keystoneclient +python-manilaclient python-neutronclient python-novaclient python-octaviaclient @@ -34,7 +43,13 @@ python-swiftclient tenacity distro-info paramiko + # Documentation requirements sphinx sphinxcontrib-asyncio git+https://github.com/openstack-charmers/zaza#egg=zaza + +# Newer versions require a Rust compiler to build, see +# * https://github.com/openstack-charmers/zaza/issues/421 +# * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html +cryptography<3.4 diff --git a/setup.py b/setup.py index 0980b96..fb4377d 100644 --- a/setup.py +++ b/setup.py @@ -25,22 +25,40 @@ from setuptools.command.test import test as TestCommand version = "0.0.1.dev1" install_require = [ + 'futurist<2.0.0', 'async_generator', - 'cryptography', + 'boto3', + + # Newer versions require a Rust compiler to build, see + # * https://github.com/openstack-charmers/zaza/issues/421 + # * https://mail.python.org/pipermail/cryptography-dev/2021-January/001003.html + 'cryptography<3.4', + + 'dnspython', 'hvac<0.7.0', 'jinja2', 'juju', 'juju-wait', + 'lxml', 'PyYAML', 'tenacity', - 'oslo.config', - 'python-glanceclient', - 'python-keystoneclient', - 'python-novaclient', - 'python-neutronclient', - 'python-octaviaclient', - 'python-cinderclient', - 'python-swiftclient', + 'oslo.config<6.12.0', + 'aodhclient<1.4.0', + 'gnocchiclient>=7.0.5,<8.0.0', + 'pika>=1.1.0,<2.0.0', + 'python-barbicanclient>=4.0.1,<5.0.0', + 'python-designateclient>=1.5,<3.0.0', + 'python-heatclient<2.0.0', + 'python-ironicclient', + 'python-glanceclient<3.0.0', + 'python-keystoneclient<3.22.0', + 'python-manilaclient<2.0.0', + 'python-novaclient<16.0.0', + 'python-neutronclient<7.0.0', + 'python-octaviaclient<1.11.0', + 'python-ceilometerclient', + 'python-cinderclient<6.0.0', + 'python-swiftclient<3.9.0', 'zaza@git+https://github.com/openstack-charmers/zaza.git#egg=zaza', ] @@ -96,10 +114,11 @@ setup( license='Apache-2.0: http://www.apache.org/licenses/LICENSE-2.0', packages=find_packages(exclude=["unit_tests"]), zip_safe=False, + include_package_data=True, cmdclass={'test': Tox}, install_requires=install_require, extras_require={ 'testing': tests_require, }, tests_require=tests_require, -) \ No newline at end of file +) diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index e5095d0..0000000 --- a/tests/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This directory contains functional test definition for functional test of Zaza -itself. diff --git a/tests/bundles/magpie.yaml b/tests/bundles/magpie.yaml deleted file mode 100644 index eb065fd..0000000 --- a/tests/bundles/magpie.yaml +++ /dev/null @@ -1,5 +0,0 @@ -series: bionic -applications: - magpie: - charm: cs:~admcleod/magpie - num_units: 2 diff --git a/tests/bundles/overlays/local-charm-overlay.yaml.j2 b/tests/bundles/overlays/local-charm-overlay.yaml.j2 deleted file mode 100644 index 185df30..0000000 --- a/tests/bundles/overlays/local-charm-overlay.yaml.j2 +++ /dev/null @@ -1 +0,0 @@ -comment: this bundle overlay intentionally left blank diff --git a/tests/tests.yaml b/tests/tests.yaml deleted file mode 100644 index 73f9e9a..0000000 --- a/tests/tests.yaml +++ /dev/null @@ -1,11 +0,0 @@ -charm_name: none -gate_bundles: -- magpie -target_deploy_status: - magpie: - workload-status: active - workload-status-message: icmp ok, local hostname ok, dns ok -configure: -- zaza.openstack.charm_tests.noop.setup.basic_setup -tests: -- zaza.openstack.charm_tests.noop.tests.NoopTest diff --git a/tox.ini b/tox.ini index 2447271..aea173f 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,22 @@ [tox] envlist = pep8, py3 skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 [testenv] setenv = VIRTUAL_ENV={envdir} @@ -8,12 +24,32 @@ setenv = VIRTUAL_ENV={envdir} install_command = pip install {opts} {packages} -commands = nosetests --with-coverage --cover-package=zaza {posargs} {toxinidir}/unit_tests +commands = nosetests --with-coverage --cover-package=zaza.openstack {posargs} {toxinidir}/unit_tests [testenv:py3] basepython = python3 deps = -r{toxinidir}/requirements.txt +[testenv:py3.5] +basepython = python3.5 +deps = -r{toxinidir}/requirements.txt + +[testenv:py3.6] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + +[testenv:py3.7] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + +[testenv:py3.8] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + +[testenv:py3.9] +basepython = python3.9 +deps = -r{toxinidir}/requirements.txt + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/requirements.txt @@ -25,7 +61,7 @@ deps = -r{toxinidir}/requirements.txt commands = /bin/true [flake8] -ignore = E402,E226 +ignore = E402,E226,W504 per-file-ignores = unit_tests/**: D @@ -34,4 +70,4 @@ basepython = python3 changedir = doc/source deps = -r{toxinidir}/requirements.txt -commands = sphinx-build -W -b html -d {toxinidir}/doc/build/doctrees . {toxinidir}/doc/build/html \ No newline at end of file +commands = sphinx-build -W -b html -d {toxinidir}/doc/build/doctrees . {toxinidir}/doc/build/html diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py index 8203d13..03c4879 100644 --- a/unit_tests/__init__.py +++ b/unit_tests/__init__.py @@ -11,3 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import sys +import unittest.mock as mock + +sys.modules['zaza.utilities.maas'] = mock.MagicMock() diff --git a/unit_tests/charm_tests/__init__.py b/unit_tests/charm_tests/__init__.py new file mode 100644 index 0000000..6131624 --- /dev/null +++ b/unit_tests/charm_tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/unit_tests/charm_tests/test_mysql.py b/unit_tests/charm_tests/test_mysql.py new file mode 100644 index 0000000..e38460b --- /dev/null +++ b/unit_tests/charm_tests/test_mysql.py @@ -0,0 +1,40 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import mock +import unittest +import sys + +import zaza.openstack.charm_tests.mysql.utils as mysql_utils + + +class TestMysqlUtils(unittest.TestCase): + """Test class to encapsulate testing Mysql test utils.""" + + def setUp(self): + super(TestMysqlUtils, self).setUp() + if sys.version_info < (3, 6, 0): + raise unittest.SkipTest("Can't AsyncMock in py35") + + @mock.patch.object(mysql_utils, 'model') + def test_mysql_complete_cluster_series_upgrade(self, mock_model): + run_action_on_leader = mock.AsyncMock() + mock_model.async_run_action_on_leader = run_action_on_leader + asyncio.get_event_loop().run_until_complete( + mysql_utils.complete_cluster_series_upgrade()) + run_action_on_leader.assert_called_once_with( + 'mysql', + 'complete-cluster-series-upgrade', + action_params={}) diff --git a/unit_tests/charm_tests/test_rabbitmq_server.py b/unit_tests/charm_tests/test_rabbitmq_server.py new file mode 100644 index 0000000..e092122 --- /dev/null +++ b/unit_tests/charm_tests/test_rabbitmq_server.py @@ -0,0 +1,40 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import mock +import unittest +import sys + +import zaza.openstack.charm_tests.rabbitmq_server.utils as rabbit_utils + + +class TestRabbitUtils(unittest.TestCase): + """Test class to encapsulate testing Mysql test utils.""" + + def setUp(self): + super(TestRabbitUtils, self).setUp() + if sys.version_info < (3, 6, 0): + raise unittest.SkipTest("Can't AsyncMock in py35") + + @mock.patch.object(rabbit_utils.zaza, 'model') + def test_rabbit_complete_cluster_series_upgrade(self, mock_model): + run_action_on_leader = mock.AsyncMock() + mock_model.async_run_action_on_leader = run_action_on_leader + asyncio.get_event_loop().run_until_complete( + rabbit_utils.complete_cluster_series_upgrade()) + run_action_on_leader.assert_called_once_with( + 'rabbitmq-server', + 'complete-cluster-series-upgrade', + action_params={}) diff --git a/unit_tests/charm_tests/test_tempest.py b/unit_tests/charm_tests/test_tempest.py new file mode 100644 index 0000000..3c4c161 --- /dev/null +++ b/unit_tests/charm_tests/test_tempest.py @@ -0,0 +1,66 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +import unittest + +import zaza.openstack.charm_tests.tempest.setup as tempest_setup + + +class TestTempestSetup(unittest.TestCase): + """Test class to encapsulate testing Mysql test utils.""" + + def setUp(self): + super(TestTempestSetup, self).setUp() + + def test_add_environment_var_config_with_missing_variable(self): + ctxt = {} + with self.assertRaises(Exception) as context: + tempest_setup.add_environment_var_config(ctxt, ['swift']) + self.assertEqual( + ('Environment variables [TEST_SWIFT_IP] must all be ' + 'set to run this test'), + str(context.exception)) + + @mock.patch.object(tempest_setup.deployment_env, 'get_deployment_context') + def test_add_environment_var_config_with_all_variables( + self, + get_deployment_context): + ctxt = {} + get_deployment_context.return_value = { + 'TEST_GATEWAY': 'test', + 'TEST_CIDR_EXT': 'test', + 'TEST_FIP_RANGE': 'test', + 'TEST_NAME_SERVER': 'test', + 'TEST_CIDR_PRIV': 'test', + } + tempest_setup.add_environment_var_config(ctxt, ['neutron']) + self.assertEqual(ctxt['test_gateway'], 'test') + + @mock.patch.object(tempest_setup.deployment_env, 'get_deployment_context') + def test_add_environment_var_config_with_some_variables( + self, + get_deployment_context): + ctxt = {} + get_deployment_context.return_value = { + 'TEST_GATEWAY': 'test', + 'TEST_NAME_SERVER': 'test', + 'TEST_CIDR_PRIV': 'test', + } + with self.assertRaises(Exception) as context: + tempest_setup.add_environment_var_config(ctxt, ['neutron']) + self.assertEqual( + ('Environment variables [TEST_CIDR_EXT, TEST_FIP_RANGE] must ' + 'all be set to run this test'), + str(context.exception)) diff --git a/unit_tests/charm_tests/test_utils.py b/unit_tests/charm_tests/test_utils.py new file mode 100644 index 0000000..c1e8166 --- /dev/null +++ b/unit_tests/charm_tests/test_utils.py @@ -0,0 +1,201 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import mock + +import zaza.openstack.charm_tests.test_utils as test_utils + +import unit_tests.utils as ut_utils + + +class TestBaseCharmTest(ut_utils.BaseTestCase): + + def setUp(self): + super(TestBaseCharmTest, self).setUp() + self.target = test_utils.BaseCharmTest() + + def patch_target(self, attr, return_value=None): + mocked = mock.patch.object(self.target, attr) + self._patches[attr] = mocked + started = mocked.start() + started.return_value = return_value + self._patches_start[attr] = started + setattr(self, attr, started) + + def test_get_my_tests_options(self): + + class FakeTest(test_utils.BaseCharmTest): + + def method(self, test_config): + self.test_config = test_config + return self.get_my_tests_options('aKey', 'aDefault') + + f = FakeTest() + self.assertEquals(f.method({}), 'aDefault') + self.assertEquals(f.method({ + 'tests_options': { + 'unit_tests.charm_tests.test_utils.' + 'FakeTest.method.aKey': 'aValue', + }, + }), 'aValue') + + def test_config_change(self): + default_config = {'fakeKey': 'testProvidedDefault'} + alterna_config = {'fakeKey': 'testProvidedAlterna'} + self.target.model_name = 'aModel' + self.target.test_config = {} + self.patch_target('config_current') + self.config_current.return_value = default_config + self.patch_object(test_utils.model, 'set_application_config') + self.patch_object(test_utils.model, 'wait_for_agent_status') + self.patch_object(test_utils.model, 'wait_for_application_states') + self.patch_object(test_utils.model, 'block_until_all_units_idle') + with self.target.config_change( + default_config, alterna_config, application_name='anApp'): + self.set_application_config.assert_called_once_with( + 'anApp', alterna_config, model_name='aModel') + self.wait_for_agent_status.assert_called_once_with( + model_name='aModel') + self.wait_for_application_states.assert_called_once_with( + model_name='aModel', states={}) + self.block_until_all_units_idle.assert_called_once_with() + # after yield we will have different calls than the above, measure both + self.set_application_config.assert_has_calls([ + mock.call('anApp', alterna_config, model_name='aModel'), + mock.call('anApp', default_config, model_name='aModel'), + ]) + self.wait_for_application_states.assert_has_calls([ + mock.call(model_name='aModel', states={}), + mock.call(model_name='aModel', states={}), + ]) + self.block_until_all_units_idle.assert_has_calls([ + mock.call(), + mock.call(), + ]) + # confirm operation with `reset_to_charm_default` + self.set_application_config.reset_mock() + self.wait_for_agent_status.reset_mock() + self.wait_for_application_states.reset_mock() + self.patch_object(test_utils.model, 'reset_application_config') + with self.target.config_change( + default_config, alterna_config, application_name='anApp', + reset_to_charm_default=True): + self.set_application_config.assert_called_once_with( + 'anApp', alterna_config, model_name='aModel') + # we want to assert this not to be called after yield + self.set_application_config.reset_mock() + self.assertFalse(self.set_application_config.called) + self.reset_application_config.assert_called_once_with( + 'anApp', list(alterna_config.keys()), model_name='aModel') + self.wait_for_application_states.assert_has_calls([ + mock.call(model_name='aModel', states={}), + mock.call(model_name='aModel', states={}), + ]) + self.block_until_all_units_idle.assert_has_calls([ + mock.call(), + mock.call(), + ]) + # confirm operation where both default and alternate config passed in + # are the same. This is used to set config and not change it back. + self.set_application_config.reset_mock() + self.wait_for_agent_status.reset_mock() + self.wait_for_application_states.reset_mock() + self.reset_application_config.reset_mock() + with self.target.config_change( + alterna_config, alterna_config, application_name='anApp'): + self.set_application_config.assert_called_once_with( + 'anApp', alterna_config, model_name='aModel') + # we want to assert these not to be called after yield + self.set_application_config.reset_mock() + self.wait_for_agent_status.reset_mock() + self.wait_for_application_states.reset_mock() + self.assertFalse(self.set_application_config.called) + self.assertFalse(self.reset_application_config.called) + self.assertFalse(self.wait_for_agent_status.called) + self.assertFalse(self.wait_for_application_states.called) + + def test_separate_non_string_config(self): + intended_cfg_keys = ['foo2', 'foo3', 'foo4', 'foo5'] + current_config_mock = { + 'foo2': None, + 'foo3': 'old_bar3', + 'foo4': None, + 'foo5': 'old_bar5', + } + self.patch_target('config_current') + self.config_current.return_value = current_config_mock + non_string_type_keys = ['foo2', 'foo3', 'foo4'] + expected_result_filtered = { + 'foo3': 'old_bar3', + 'foo5': 'old_bar5', + } + expected_result_special = { + 'foo2': None, + 'foo4': None, + } + current, non_string = ( + self.target.config_current_separate_non_string_type_keys( + non_string_type_keys, intended_cfg_keys, 'application_name') + ) + + self.assertEqual(expected_result_filtered, current) + self.assertEqual(expected_result_special, non_string) + + self.config_current.assert_called_once_with( + 'application_name', intended_cfg_keys) + + def test_separate_special_config_None_params(self): + current_config_mock = { + 'foo1': 'old_bar1', + 'foo2': None, + 'foo3': 'old_bar3', + 'foo4': None, + 'foo5': 'old_bar5', + } + self.patch_target('config_current') + self.config_current.return_value = current_config_mock + non_string_type_keys = ['foo2', 'foo3', 'foo4'] + expected_result_filtered = { + 'foo1': 'old_bar1', + 'foo3': 'old_bar3', + 'foo5': 'old_bar5', + } + expected_result_special = { + 'foo2': None, + 'foo4': None, + } + current, non_string = ( + self.target.config_current_separate_non_string_type_keys( + non_string_type_keys) + ) + + self.assertEqual(expected_result_filtered, current) + self.assertEqual(expected_result_special, non_string) + + self.config_current.assert_called_once_with(None, None) + + +class TestOpenStackBaseTest(ut_utils.BaseTestCase): + + def test_setUpClass(self): + self.patch_object(test_utils.openstack_utils, 'get_cacert') + self.patch_object(test_utils.openstack_utils, + 'get_overcloud_keystone_session') + self.patch_object(test_utils.BaseCharmTest, 'setUpClass') + + class MyTestClass(test_utils.OpenStackBaseTest): + model_name = 'deadbeef' + + MyTestClass.setUpClass('foo', 'bar') + self.setUpClass.assert_called_with('foo', 'bar') diff --git a/unit_tests/utilities/swift_test_data.py b/unit_tests/utilities/swift_test_data.py new file mode 100644 index 0000000..ac3145a --- /dev/null +++ b/unit_tests/utilities/swift_test_data.py @@ -0,0 +1,69 @@ +# flake8: noqa + +SWIFT_GET_NODES_STDOUT = """ +Account 23934cb1850c4d28b1ca113a24c0e46b +Container zaza-swift-gr-tests-f3129278-container +Object zaza_test_object.txt + + +Partition 146 +Hash 928c2f8006efeeb4b1164f4cce035887 + +Server:Port Device 10.5.0.38:6000 loop0 +Server:Port Device 10.5.0.4:6000 loop0 +Server:Port Device 10.5.0.9:6000 loop0 [Handoff] +Server:Port Device 10.5.0.34:6000 loop0 [Handoff] +Server:Port Device 10.5.0.15:6000 loop0 [Handoff] +Server:Port Device 10.5.0.18:6000 loop0 [Handoff] + + +curl -g -I -XHEAD "http://10.5.0.38:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" +curl -g -I -XHEAD "http://10.5.0.4:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" +curl -g -I -XHEAD "http://10.5.0.9:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" # [Handoff] +curl -g -I -XHEAD "http://10.5.0.34:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" # [Handoff] +curl -g -I -XHEAD "http://10.5.0.15:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" # [Handoff] +curl -g -I -XHEAD "http://10.5.0.18:6000/loop0/146/23934cb1850c4d28b1ca113a24c0e46b/zaza-swift-gr-tests-f3129278-container/zaza_test_object.txt" # [Handoff] + + +Use your own device location of servers: +such as "export DEVICE=/srv/node" +ssh 10.5.0.38 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" +ssh 10.5.0.4 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" +ssh 10.5.0.9 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" # [Handoff] +ssh 10.5.0.34 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" # [Handoff] +ssh 10.5.0.15 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" # [Handoff] +ssh 10.5.0.18 "ls -lah ${DEVICE:-/srv/node*}/loop0/objects/146/887/928c2f8006efeeb4b1164f4cce035887" # [Handoff] + +note: `/srv/node*` is used as default value of `devices`, the real value is set in the config file on each storage node. +""" + +STORAGE_TOPOLOGY = { + '10.5.0.18': { + 'app_name': 'swift-storage-region1-zone1', + 'unit': "swift-storage-region1-zone1/0", + 'region': 1, + 'zone': 1}, + '10.5.0.34': { + 'app_name': 'swift-storage-region1-zone2', + 'unit': "swift-storage-region1-zone2/0", + 'region': 1, + 'zone': 2}, + '10.5.0.4': { + 'app_name': 'swift-storage-region1-zone3', + 'unit': "swift-storage-region1-zone3/0", + 'region': 1, + 'zone': 3}, + '10.5.0.9': { + 'app_name': 'swift-storage-region2-zone1', + 'unit': "swift-storage-region2-zone1/0", + 'region': 2, + 'zone': 1}, + '10.5.0.15': { + 'app_name': 'swift-storage-region2-zone2', + 'unit': "swift-storage-region2-zone2/0", + 'region': 2, 'zone': 2}, + '10.5.0.38': { + 'app_name': 'swift-storage-region2-zone3', + 'unit': "swift-storage-region2-zone3/0", + 'region': 2, + 'zone': 3}} diff --git a/unit_tests/utilities/test_utilities.py b/unit_tests/utilities/test_utilities.py new file mode 100644 index 0000000..6af6089 --- /dev/null +++ b/unit_tests/utilities/test_utilities.py @@ -0,0 +1,188 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import unit_tests.utils as ut_utils + +import zaza.openstack.utilities as utilities + + +class SomeException(Exception): + pass + + +class SomeException2(Exception): + pass + + +class SomeException3(Exception): + pass + + +class TestObjectRetrierWraps(ut_utils.BaseTestCase): + + def test_object_wrap(self): + + class A: + + def func(self, a, b=1): + return a + b + + a = A() + wrapped_a = utilities.ObjectRetrierWraps(a) + self.assertEqual(wrapped_a.func(3), 4) + + def test_object_multilevel_wrap(self): + + class A: + + def f1(self, a, b): + return a * b + + class B: + + @property + def f2(self): + + return A() + + b = B() + wrapped_b = utilities.ObjectRetrierWraps(b) + self.assertEqual(wrapped_b.f2.f1(5, 6), 30) + + def test_object_wrap_number(self): + + class A: + + class_a = 5 + + def __init__(self): + self.instance_a = 10 + + def f1(self, a, b): + return a * b + + a = A() + wrapped_a = utilities.ObjectRetrierWraps(a) + self.assertEqual(wrapped_a.class_a, 5) + self.assertEqual(wrapped_a.instance_a, 10) + + @mock.patch("time.sleep") + def test_object_wrap_exception(self, mock_sleep): + + class A: + + def func(self): + raise SomeException() + + a = A() + # retry on a specific exception + wrapped_a = utilities.ObjectRetrierWraps( + a, num_retries=1, retry_exceptions=[SomeException]) + with self.assertRaises(SomeException): + wrapped_a.func() + + mock_sleep.assert_called_once_with(5) + + # also retry on any exception if none specified + wrapped_a = utilities.ObjectRetrierWraps(a, num_retries=1) + mock_sleep.reset_mock() + with self.assertRaises(SomeException): + wrapped_a.func() + + mock_sleep.assert_called_once_with(5) + + # no retry if exception isn't listed. + wrapped_a = utilities.ObjectRetrierWraps( + a, num_retries=1, retry_exceptions=[SomeException2]) + mock_sleep.reset_mock() + with self.assertRaises(SomeException): + wrapped_a.func() + + mock_sleep.assert_not_called() + + @mock.patch("time.sleep") + def test_log_called(self, mock_sleep): + + class A: + + def func(self): + raise SomeException() + + a = A() + mock_log = mock.Mock() + wrapped_a = utilities.ObjectRetrierWraps( + a, num_retries=1, log=mock_log) + with self.assertRaises(SomeException): + wrapped_a.func() + + # there should be two calls; one for the single retry and one for the + # failure. + self.assertEqual(mock_log.call_count, 2) + + @mock.patch("time.sleep") + def test_back_off_maximum(self, mock_sleep): + + class A: + + def func(self): + raise SomeException() + + a = A() + wrapped_a = utilities.ObjectRetrierWraps(a, num_retries=3, backoff=2) + with self.assertRaises(SomeException): + wrapped_a.func() + # Note third call hits maximum wait time of 15. + mock_sleep.assert_has_calls([mock.call(5), + mock.call(10), + mock.call(15)]) + + @mock.patch("time.sleep") + def test_total_wait(self, mock_sleep): + + class A: + + def func(self): + raise SomeException() + + a = A() + wrapped_a = utilities.ObjectRetrierWraps( + a, num_retries=3, total_wait=9) + with self.assertRaises(SomeException): + wrapped_a.func() + # Note only two calls, as total wait is 9, so a 3rd retry would exceed + # that. + mock_sleep.assert_has_calls([mock.call(5), + mock.call(5)]) + + @mock.patch("time.sleep") + def test_retry_on_connect_failure(self, mock_sleep): + + class A: + + def func1(self): + raise SomeException() + + def func2(self): + raise utilities.ConnectFailure() + + a = A() + wrapped_a = utilities.retry_on_connect_failure(a, num_retries=2) + with self.assertRaises(SomeException): + wrapped_a.func1() + mock_sleep.assert_not_called() + with self.assertRaises(utilities.ConnectFailure): + wrapped_a.func2() + mock_sleep.assert_has_calls([mock.call(5)]) diff --git a/unit_tests/utilities/test_zaza_utilities_ceph.py b/unit_tests/utilities/test_zaza_utilities_ceph.py index 0855e41..e6ac337 100644 --- a/unit_tests/utilities/test_zaza_utilities_ceph.py +++ b/unit_tests/utilities/test_zaza_utilities_ceph.py @@ -116,3 +116,20 @@ class TestCephUtils(ut_utils.BaseTestCase): with self.assertRaises(model.CommandRunFailed): ceph_utils.get_rbd_hash('aunit', 'apool', 'aimage', model_name='amodel') + + def test_pools_from_broker_req(self): + self.patch_object(ceph_utils.juju_utils, 'get_relation_from_unit') + self.get_relation_from_unit.return_value = { + 'broker_req': ( + '{"api-version": 1, "ops": [' + '{"op": "create-pool", "name": "cinder-ceph", ' + '"compression-mode": null},' + '{"op": "create-pool", "name": "cinder-ceph", ' + '"compression-mode": "aggressive"}]}'), + } + self.assertEquals( + ceph_utils.get_pools_from_broker_req( + 'anApplication', 'aModelName'), + ['cinder-ceph']) + self.get_relation_from_unit.assert_called_once_with( + 'ceph-mon', 'anApplication', None, model_name='aModelName') diff --git a/unit_tests/utilities/test_zaza_utilities_generic.py b/unit_tests/utilities/test_zaza_utilities_generic.py index 4712d75..f1a648f 100644 --- a/unit_tests/utilities/test_zaza_utilities_generic.py +++ b/unit_tests/utilities/test_zaza_utilities_generic.py @@ -125,23 +125,30 @@ class TestGenericUtils(ut_utils.BaseTestCase): return _env.get(key) self.get.side_effect = _get_env - # OSCI backward compatible env vars + # Prefered OSCI TEST_ env vars _env = {"NET_ID": "netid", - "NAMESERVER": "10.0.0.10", + "NAME_SERVER": "10.0.0.10", "GATEWAY": "10.0.0.1", "CIDR_EXT": "10.0.0.0/24", - "FIP_RANGE": "10.0.200.0:10.0.200.254"} + "FIP_RANGE": "10.0.200.0:10.0.200.254", + "TEST_NET_ID": "test_netid", + "TEST_NAME_SERVER": "10.9.0.10", + "TEST_GATEWAY": "10.9.0.1", + "TEST_CIDR_EXT": "10.9.0.0/24", + "TEST_FIP_RANGE": "10.9.200.0:10.0.200.254"} _expected_result = {} - _expected_result["net_id"] = _env["NET_ID"] - _expected_result["external_dns"] = _env["NAMESERVER"] - _expected_result["default_gateway"] = _env["GATEWAY"] - _expected_result["external_net_cidr"] = _env["CIDR_EXT"] - _expected_result["start_floating_ip"] = _env["FIP_RANGE"].split(":")[0] - _expected_result["end_floating_ip"] = _env["FIP_RANGE"].split(":")[1] + _expected_result["net_id"] = _env["TEST_NET_ID"] + _expected_result["external_dns"] = _env["TEST_NAME_SERVER"] + _expected_result["default_gateway"] = _env["TEST_GATEWAY"] + _expected_result["external_net_cidr"] = _env["TEST_CIDR_EXT"] + _expected_result["start_floating_ip"] = _env[ + "TEST_FIP_RANGE"].split(":")[0] + _expected_result["end_floating_ip"] = _env[ + "TEST_FIP_RANGE"].split(":")[1] self.assertEqual(generic_utils.get_undercloud_env_vars(), _expected_result) - # Overriding configure.network named variables + # Overriding local configure.network named variables _override = {"start_floating_ip": "10.100.50.0", "end_floating_ip": "10.100.50.254", "default_gateway": "10.100.0.1", @@ -166,34 +173,6 @@ class TestGenericUtils(ut_utils.BaseTestCase): _yaml_dict) self._open.assert_called_once_with(_filename, "r") - def test_do_release_upgrade(self): - _unit = "app/2" - generic_utils.do_release_upgrade(_unit) - self.subprocess.check_call.assert_called_once_with( - ['juju', 'ssh', _unit, 'sudo', 'DEBIAN_FRONTEND=noninteractive', - 'do-release-upgrade', '-f', 'DistUpgradeViewNonInteractive']) - - def test_wrap_do_release_upgrade(self): - self.patch_object(generic_utils, "do_release_upgrade") - self.patch_object(generic_utils, "run_via_ssh") - self.patch_object(generic_utils.model, "scp_to_unit") - _unit = "app/2" - _from_series = "xenial" - _to_series = "bionic" - _workaround_script = "scriptname" - _files = ["filename", _workaround_script] - _scp_calls = [] - _run_calls = [ - mock.call(_unit, _workaround_script)] - for filename in _files: - _scp_calls.append(mock.call(_unit, filename, filename)) - generic_utils.wrap_do_release_upgrade( - _unit, to_series=_to_series, from_series=_from_series, - workaround_script=_workaround_script, files=_files) - self.scp_to_unit.assert_has_calls(_scp_calls) - self.run_via_ssh.assert_has_calls(_run_calls) - self.do_release_upgrade.assert_called_once_with(_unit) - def test_reboot(self): _unit = "app/2" generic_utils.reboot(_unit) @@ -219,146 +198,6 @@ class TestGenericUtils(ut_utils.BaseTestCase): self.set_application_config.assert_called_once_with( _application, {_origin: _pocket}) - def test_series_upgrade(self): - self.patch_object(generic_utils.model, "block_until_all_units_idle") - self.patch_object(generic_utils.model, "block_until_unit_wl_status") - self.patch_object(generic_utils.model, "prepare_series_upgrade") - self.patch_object(generic_utils.model, "complete_series_upgrade") - self.patch_object(generic_utils.model, "set_series") - self.patch_object(generic_utils, "set_origin") - self.patch_object(generic_utils, "wrap_do_release_upgrade") - self.patch_object(generic_utils, "reboot") - _unit = "app/2" - _application = "app" - _machine_num = "4" - _from_series = "xenial" - _to_series = "bionic" - _origin = "source" - _files = ["filename", "scriptname"] - _workaround_script = "scriptname" - generic_utils.series_upgrade( - _unit, _machine_num, origin=_origin, - to_series=_to_series, from_series=_from_series, - workaround_script=_workaround_script, files=_files) - self.block_until_all_units_idle.called_with() - self.prepare_series_upgrade.assert_called_once_with( - _machine_num, to_series=_to_series) - self.wrap_do_release_upgrade.assert_called_once_with( - _unit, to_series=_to_series, from_series=_from_series, - workaround_script=_workaround_script, files=_files) - self.complete_series_upgrade.assert_called_once_with(_machine_num) - self.set_series.assert_called_once_with(_application, _to_series) - self.set_origin.assert_called_once_with(_application, _origin) - self.reboot.assert_called_once_with(_unit) - - def test_series_upgrade_application_pause_peers_and_subordinates(self): - self.patch_object(generic_utils.model, "run_action") - self.patch_object(generic_utils, "series_upgrade") - _application = "app" - _from_series = "xenial" - _to_series = "bionic" - _origin = "source" - _files = ["filename", "scriptname"] - _workaround_script = "scriptname" - _completed_machines = [] - # Peers and Subordinates - _run_action_calls = [ - mock.call("{}-hacluster/1".format(_application), - "pause", action_params={}), - mock.call("{}/1".format(_application), "pause", action_params={}), - mock.call("{}-hacluster/2".format(_application), - "pause", action_params={}), - mock.call("{}/2".format(_application), "pause", action_params={}), - ] - _series_upgrade_calls = [] - for machine_num in ("0", "1", "2"): - _series_upgrade_calls.append( - mock.call("{}/{}".format(_application, machine_num), - machine_num, origin=_origin, - from_series=_from_series, to_series=_to_series, - workaround_script=_workaround_script, files=_files), - ) - - # Pause primary peers and subordinates - generic_utils.series_upgrade_application( - _application, origin=_origin, - to_series=_to_series, from_series=_from_series, - pause_non_leader_primary=True, - pause_non_leader_subordinate=True, - completed_machines=_completed_machines, - workaround_script=_workaround_script, files=_files), - self.run_action.assert_has_calls(_run_action_calls) - self.series_upgrade.assert_has_calls(_series_upgrade_calls) - - def test_series_upgrade_application_pause_subordinates(self): - self.patch_object(generic_utils.model, "run_action") - self.patch_object(generic_utils, "series_upgrade") - _application = "app" - _from_series = "xenial" - _to_series = "bionic" - _origin = "source" - _files = ["filename", "scriptname"] - _workaround_script = "scriptname" - _completed_machines = [] - # Subordinates only - _run_action_calls = [ - mock.call("{}-hacluster/1".format(_application), - "pause", action_params={}), - mock.call("{}-hacluster/2".format(_application), - "pause", action_params={}), - ] - _series_upgrade_calls = [] - - for machine_num in ("0", "1", "2"): - _series_upgrade_calls.append( - mock.call("{}/{}".format(_application, machine_num), - machine_num, origin=_origin, - from_series=_from_series, to_series=_to_series, - workaround_script=_workaround_script, files=_files), - ) - - # Pause subordinates - generic_utils.series_upgrade_application( - _application, origin=_origin, - to_series=_to_series, from_series=_from_series, - pause_non_leader_primary=False, - pause_non_leader_subordinate=True, - completed_machines=_completed_machines, - workaround_script=_workaround_script, files=_files), - self.run_action.assert_has_calls(_run_action_calls) - self.series_upgrade.assert_has_calls(_series_upgrade_calls) - - def test_series_upgrade_application_no_pause(self): - self.patch_object(generic_utils.model, "run_action") - self.patch_object(generic_utils, "series_upgrade") - _application = "app" - _from_series = "xenial" - _to_series = "bionic" - _origin = "source" - _series_upgrade_calls = [] - _files = ["filename", "scriptname"] - _workaround_script = "scriptname" - _completed_machines = [] - - for machine_num in ("0", "1", "2"): - _series_upgrade_calls.append( - mock.call("{}/{}".format(_application, machine_num), - machine_num, origin=_origin, - from_series=_from_series, to_series=_to_series, - workaround_script=_workaround_script, files=_files), - ) - - # No Pausiing - generic_utils.series_upgrade_application( - _application, origin=_origin, - to_series=_to_series, from_series=_from_series, - pause_non_leader_primary=False, - pause_non_leader_subordinate=False, - completed_machines=_completed_machines, - workaround_script=_workaround_script, files=_files) - self.run_action.assert_not_called() - self.series_upgrade.assert_has_calls(_series_upgrade_calls) - def test_set_dpkg_non_interactive_on_unit(self): self.patch_object(generic_utils, "model") _unit_name = "app/1" @@ -561,3 +400,126 @@ class TestGenericUtils(ut_utils.BaseTestCase): self.telnet.side_effect = generic_utils.socket.error self.assertFalse(generic_utils.is_port_open(_port, _addr)) + + def test_get_unit_hostnames(self): + self.patch( + "zaza.openstack.utilities.generic.model.run_on_unit", + new_callable=mock.MagicMock(), + name="_run" + ) + + _unit1 = mock.MagicMock() + _unit1.entity_id = "testunit/1" + _unit2 = mock.MagicMock() + _unit2.entity_id = "testunit/2" + + _hostname1 = "host1.domain" + _hostname2 = "host2.domain" + + expected = { + _unit1.entity_id: _hostname1, + _unit2.entity_id: _hostname2, + } + + _units = [_unit1, _unit2] + + self._run.side_effect = [{"Stdout": _hostname1}, + {"Stdout": _hostname2}] + + actual = generic_utils.get_unit_hostnames(_units) + + self.assertEqual(actual, expected) + expected_run_calls = [ + mock.call('testunit/1', 'hostname'), + mock.call('testunit/2', 'hostname')] + self._run.assert_has_calls(expected_run_calls) + + self._run.reset_mock() + self._run.side_effect = [{"Stdout": _hostname1}, + {"Stdout": _hostname2}] + expected_run_calls = [ + mock.call('testunit/1', 'hostname -f'), + mock.call('testunit/2', 'hostname -f')] + + actual = generic_utils.get_unit_hostnames(_units, fqdn=True) + self._run.assert_has_calls(expected_run_calls) + + def test_port_knock_units(self): + self.patch( + "zaza.openstack.utilities.generic.is_port_open", + new_callable=mock.MagicMock(), + name="_is_port_open" + ) + + _units = [ + mock.MagicMock(), + mock.MagicMock(), + ] + + self._is_port_open.side_effect = [True, True] + self.assertIsNone(generic_utils.port_knock_units(_units)) + self.assertEqual(self._is_port_open.call_count, len(_units)) + + self._is_port_open.side_effect = [True, False] + self.assertIsNotNone(generic_utils.port_knock_units(_units)) + + # check when func is expecting failure, i.e. should succeed + self._is_port_open.reset_mock() + self._is_port_open.side_effect = [False, False] + self.assertIsNone(generic_utils.port_knock_units(_units, + expect_success=False)) + self.assertEqual(self._is_port_open.call_count, len(_units)) + + def test_check_commands_on_units(self): + self.patch( + "zaza.openstack.utilities.generic.model.run_on_unit", + new_callable=mock.MagicMock(), + name="_run" + ) + + num_units = 2 + _units = [mock.MagicMock() for i in range(num_units)] + + num_cmds = 3 + cmds = ["/usr/bin/fakecmd"] * num_cmds + + # Test success, all calls return 0 + # zero is a string to replicate run_on_unit return data type + _cmd_results = [{"Code": "0"}] * len(_units) * len(cmds) + self._run.side_effect = _cmd_results + + result = generic_utils.check_commands_on_units(cmds, _units) + self.assertIsNone(result) + self.assertEqual(self._run.call_count, len(_units) * len(cmds)) + + # Test failure, some call returns 1 + _cmd_results[2] = {"Code": "1"} + self._run.side_effect = _cmd_results + + result = generic_utils.check_commands_on_units(cmds, _units) + self.assertIsNotNone(result) + + def test_systemctl(self): + self.patch_object(generic_utils.model, "get_unit_from_name") + self.patch_object(generic_utils.model, "run_on_unit") + _unit = mock.MagicMock() + _unit.entity_id = "unit/2" + _command = "stop" + _service = "servicename" + _systemctl = "/bin/systemctl {} {}".format(_command, _service) + self.run_on_unit.return_value = {"Code": 0} + self.get_unit_from_name.return_value = _unit + + # With Unit object + generic_utils.systemctl(_unit, _service, command=_command) + self.run_on_unit.assert_called_with(_unit.entity_id, _systemctl) + + # With string name unit + generic_utils.systemctl(_unit.entity_id, _service, command=_command) + self.run_on_unit.assert_called_with(_unit.entity_id, _systemctl) + + # Failed return code + self.run_on_unit.return_value = {"Code": 1} + with self.assertRaises(AssertionError): + generic_utils.systemctl( + _unit.entity_id, _service, command=_command) diff --git a/unit_tests/utilities/test_zaza_utilities_juju.py b/unit_tests/utilities/test_zaza_utilities_juju.py deleted file mode 100644 index e70bef8..0000000 --- a/unit_tests/utilities/test_zaza_utilities_juju.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright 2018 Canonical Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import mock -import unit_tests.utils as ut_utils -from zaza.openstack.utilities import juju as juju_utils - - -class TestJujuUtils(ut_utils.BaseTestCase): - - def setUp(self): - super(TestJujuUtils, self).setUp() - - # Juju Status Object and data - self.key = "instance-id" - self.key_data = "machine-uuid" - self.machine = "1" - self.machine_data = {self.key: self.key_data} - self.unit = "app/1" - self.unit_data = {"machine": self.machine} - self.application = "app" - self.application_data = {"units": {self.unit: self.unit_data}} - self.subordinate_application = "subordinate_application" - self.subordinate_application_data = { - "subordinate-to": [self.application]} - self.juju_status = mock.MagicMock() - self.juju_status.name = "juju_status_object" - self.juju_status.applications.get.return_value = self.application_data - self.juju_status.machines.get.return_value = self.machine_data - - # Model - self.patch_object(juju_utils, "model") - self.model_name = "model-name" - self.model.get_juju_model.return_value = self.model_name - self.model.get_status.return_value = self.juju_status - self.run_output = {"Code": "0", "Stderr": "", "Stdout": "RESULT"} - self.error_run_output = {"Code": "1", "Stderr": "ERROR", "Stdout": ""} - self.model.run_on_unit.return_value = self.run_output - - # Clouds - self.cloud_name = "FakeCloudName" - self.cloud_type = "FakeCloudType" - self.clouds = { - "clouds": - {self.cloud_name: - {"type": self.cloud_type}}} - - # Controller - self.patch_object(juju_utils, "controller") - self.controller.get_cloud.return_value = self.cloud_name - - def test_get_application_status(self): - self.patch_object(juju_utils, "get_full_juju_status") - self.get_full_juju_status.return_value = self.juju_status - - # Full status juju object return - self.assertEqual( - juju_utils.get_application_status(), self.juju_status) - self.get_full_juju_status.assert_called_once() - - # Application only dictionary return - self.assertEqual( - juju_utils.get_application_status(application=self.application), - self.application_data) - - # Unit no application dictionary return - self.assertEqual( - juju_utils.get_application_status(unit=self.unit), - self.unit_data) - - def test_get_cloud_configs(self): - self.patch_object(juju_utils.Path, "home") - self.patch_object(juju_utils.generic_utils, "get_yaml_config") - self.get_yaml_config.return_value = self.clouds - - # All the cloud configs - self.assertEqual(juju_utils.get_cloud_configs(), self.clouds) - - # With cloud specified - self.assertEqual(juju_utils.get_cloud_configs(self.cloud_name), - self.clouds["clouds"][self.cloud_name]) - - def test_get_full_juju_status(self): - self.assertEqual(juju_utils.get_full_juju_status(), self.juju_status) - self.model.get_status.assert_called_once_with() - - def test_get_machines_for_application(self): - self.patch_object(juju_utils, "get_application_status") - self.get_application_status.return_value = self.application_data - - # Machine data - self.assertEqual( - juju_utils.get_machines_for_application(self.application), - [self.machine]) - self.get_application_status.assert_called_once() - - # Subordinate application has no units - def _get_application_status(application): - _apps = { - self.application: self.application_data, - self.subordinate_application: - self.subordinate_application_data} - return _apps[application] - self.get_application_status.side_effect = _get_application_status - - self.assertEqual( - juju_utils.get_machines_for_application( - self.subordinate_application), - [self.machine]) - - def test_get_unit_name_from_host_name(self): - unit_mock1 = mock.MagicMock() - unit_mock1.data = {'machine-id': 12} - unit_mock1.entity_id = 'myapp/2' - unit_mock2 = mock.MagicMock() - unit_mock2.data = {'machine-id': 15} - unit_mock2.entity_id = 'myapp/5' - self.model.get_units.return_value = [unit_mock1, unit_mock2] - self.assertEqual( - juju_utils.get_unit_name_from_host_name('juju-model-12', 'myapp'), - 'myapp/2') - - def test_get_machine_status(self): - self.patch_object(juju_utils, "get_full_juju_status") - self.get_full_juju_status.return_value = self.juju_status - - # All machine data - self.assertEqual( - juju_utils.get_machine_status(self.machine), - self.machine_data) - self.get_full_juju_status.assert_called_once() - - # Request a specific key - self.assertEqual( - juju_utils.get_machine_status(self.machine, self.key), - self.key_data) - - def test_get_machine_uuids_for_application(self): - self.patch_object(juju_utils, "get_machines_for_application") - self.get_machines_for_application.return_value = [self.machine] - - self.assertEqual( - juju_utils.get_machine_uuids_for_application(self.application), - [self.machine_data.get("instance-id")]) - self.get_machines_for_application.assert_called_once_with( - self.application) - - def test_get_provider_type(self): - self.patch_object(juju_utils, "get_cloud_configs") - self.get_cloud_configs.return_value = {"type": self.cloud_type} - self.assertEqual(juju_utils.get_provider_type(), - self.cloud_type) - self.get_cloud_configs.assert_called_once_with(self.cloud_name) - - def test_remote_run(self): - _cmd = "do the thing" - - # Success - self.assertEqual(juju_utils.remote_run(self.unit, _cmd), - self.run_output["Stdout"]) - self.model.run_on_unit.assert_called_once_with( - self.unit, _cmd, timeout=None) - - # Non-fatal failure - self.model.run_on_unit.return_value = self.error_run_output - self.assertEqual(juju_utils.remote_run(self.unit, _cmd, fatal=False), - self.error_run_output["Stderr"]) - - # Fatal failure - with self.assertRaises(Exception): - juju_utils.remote_run(self.unit, _cmd, fatal=True) - - def test_get_unit_names(self): - self.patch('zaza.model.get_first_unit_name', new_callable=mock.Mock(), - name='_get_first_unit_name') - juju_utils._get_unit_names(['aunit/0', 'otherunit/0']) - self.assertFalse(self._get_first_unit_name.called) - - def test_get_unit_names_called_with_application_name(self): - self.patch_object(juju_utils, 'model') - juju_utils._get_unit_names(['aunit', 'otherunit/0']) - self.model.get_first_unit_name.assert_called() - - def test_get_relation_from_unit(self): - self.patch_object(juju_utils, '_get_unit_names') - self.patch_object(juju_utils, 'yaml') - self.patch_object(juju_utils, 'model') - self._get_unit_names.return_value = ['aunit/0', 'otherunit/0'] - data = {'foo': 'bar'} - self.model.get_relation_id.return_value = 42 - self.model.run_on_unit.return_value = {'Code': 0, 'Stdout': str(data)} - juju_utils.get_relation_from_unit('aunit/0', 'otherunit/0', - 'arelation') - self.model.run_on_unit.assert_called_with( - 'aunit/0', - 'relation-get --format=yaml -r "42" - "otherunit/0"') - self.yaml.safe_load.assert_called_with(str(data)) - - def test_get_relation_from_unit_fails(self): - self.patch_object(juju_utils, '_get_unit_names') - self.patch_object(juju_utils, 'yaml') - self.patch_object(juju_utils, 'model') - self._get_unit_names.return_value = ['aunit/0', 'otherunit/0'] - self.model.get_relation_id.return_value = 42 - self.model.run_on_unit.return_value = {'Code': 1, 'Stderr': 'ERROR'} - with self.assertRaises(Exception): - juju_utils.get_relation_from_unit('aunit/0', 'otherunit/0', - 'arelation') - self.model.run_on_unit.assert_called_with( - 'aunit/0', - 'relation-get --format=yaml -r "42" - "otherunit/0"') - self.assertFalse(self.yaml.safe_load.called) - - def test_leader_get(self): - self.patch_object(juju_utils, 'yaml') - self.patch_object(juju_utils, 'model') - data = {'foo': 'bar'} - self.model.run_on_leader.return_value = { - 'Code': 0, 'Stdout': str(data)} - juju_utils.leader_get('application') - self.model.run_on_leader.assert_called_with( - 'application', 'leader-get --format=yaml ') - self.yaml.safe_load.assert_called_with(str(data)) - - def test_leader_get_key(self): - self.patch_object(juju_utils, 'yaml') - self.patch_object(juju_utils, 'model') - data = {'foo': 'bar'} - self.model.run_on_leader.return_value = { - 'Code': 0, 'Stdout': data['foo']} - juju_utils.leader_get('application', 'foo') - self.model.run_on_leader.assert_called_with( - 'application', 'leader-get --format=yaml foo') - self.yaml.safe_load.assert_called_with(data['foo']) - - def test_leader_get_fails(self): - self.patch_object(juju_utils, 'yaml') - self.patch_object(juju_utils, 'model') - self.model.run_on_leader.return_value = { - 'Code': 1, 'Stderr': 'ERROR'} - with self.assertRaises(Exception): - juju_utils.leader_get('application') - self.model.run_on_leader.assert_called_with( - 'application', 'leader-get --format=yaml ') - self.assertFalse(self.yaml.safe_load.called) - - def test_get_machine_series(self): - self.patch( - 'zaza.openstack.utilities.juju.get_machine_status', - new_callable=mock.MagicMock(), - name='_get_machine_status' - ) - self._get_machine_status.return_value = 'xenial' - expected = 'xenial' - actual = juju_utils.get_machine_series('6') - self._get_machine_status.assert_called_with( - machine='6', - key='series' - ) - self.assertEqual(expected, actual) diff --git a/unit_tests/utilities/test_zaza_utilities_openstack.py b/unit_tests/utilities/test_zaza_utilities_openstack.py index 9aa3cde..0434a2b 100644 --- a/unit_tests/utilities/test_zaza_utilities_openstack.py +++ b/unit_tests/utilities/test_zaza_utilities_openstack.py @@ -16,6 +16,9 @@ import copy import datetime import io import mock +import subprocess +import sys +import unittest import tenacity import unit_tests.utils as ut_utils @@ -159,7 +162,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): # Already exists network = openstack_utils.create_external_network( - self.neutronclient, self.project_id, False) + self.neutronclient, self.project_id) self.assertEqual(network, self.network["network"]) self.neutronclient.create_network.assert_not_called() @@ -169,7 +172,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): network_msg = copy.deepcopy(self.network) network_msg["network"].pop("id") network = openstack_utils.create_external_network( - self.neutronclient, self.project_id, False) + self.neutronclient, self.project_id) self.assertEqual(network, self.network["network"]) self.neutronclient.create_network.assert_called_once_with( network_msg) @@ -190,6 +193,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): self.patch_object(openstack_utils, 'get_application_config_option') self.patch_object(openstack_utils, 'get_keystone_ip') self.patch_object(openstack_utils, "get_current_os_versions") + self.patch_object(openstack_utils, "get_remote_ca_cert_file") self.patch_object(openstack_utils.juju_utils, 'leader_get') if tls_relation: self.patch_object(openstack_utils.model, "scp_from_unit") @@ -203,6 +207,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): self.get_relation_id.return_value = None self.get_application_config_option.return_value = None self.leader_get.return_value = 'openstack' + self.get_remote_ca_cert_file.return_value = None if tls_relation or ssl_cert: port = 35357 transport = 'https' @@ -244,7 +249,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): 'API_VERSION': 3, } if tls_relation: - expect['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT + self.get_remote_ca_cert_file.return_value = '/tmp/a.cert' + expect['OS_CACERT'] = '/tmp/a.cert' self.assertEqual(openstack_utils.get_overcloud_auth(), expect) @@ -288,12 +294,22 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): openstack_utils.get_undercloud_keystone_session() self.get_keystone_session.assert_called_once_with(_auth, verify=None) + def test_get_nova_session_client(self): + session_mock = mock.MagicMock() + self.patch_object(openstack_utils.novaclient_client, "Client") + openstack_utils.get_nova_session_client(session_mock) + self.Client.assert_called_once_with(2, session=session_mock) + self.Client.reset_mock() + openstack_utils.get_nova_session_client(session_mock, version=2.56) + self.Client.assert_called_once_with(2.56, session=session_mock) + def test_get_urllib_opener(self): self.patch_object(openstack_utils.urllib.request, "ProxyHandler") self.patch_object(openstack_utils.urllib.request, "HTTPHandler") self.patch_object(openstack_utils.urllib.request, "build_opener") - self.patch_object(openstack_utils.os, "getenv") - self.getenv.return_value = None + self.patch_object(openstack_utils.deployment_env, + "get_deployment_context", + return_value=dict(TEST_HTTP_PROXY=None)) HTTPHandler_mock = mock.MagicMock() self.HTTPHandler.return_value = HTTPHandler_mock openstack_utils.get_urllib_opener() @@ -304,8 +320,9 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): self.patch_object(openstack_utils.urllib.request, "ProxyHandler") self.patch_object(openstack_utils.urllib.request, "HTTPHandler") self.patch_object(openstack_utils.urllib.request, "build_opener") - self.patch_object(openstack_utils.os, "getenv") - self.getenv.return_value = 'http://squidy' + self.patch_object(openstack_utils.deployment_env, + "get_deployment_context", + return_value=dict(TEST_HTTP_PROXY='http://squidy')) ProxyHandler_mock = mock.MagicMock() self.ProxyHandler.return_value = ProxyHandler_mock openstack_utils.get_urllib_opener() @@ -366,12 +383,15 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): 'e01df65a') def test__resource_reaches_status_bespoke(self): + client_mock = mock.MagicMock() resource_mock = mock.MagicMock() - resource_mock.get.return_value = mock.MagicMock(status='readyish') + resource_mock.special_status = 'readyish' + client_mock.get.return_value = resource_mock openstack_utils._resource_reaches_status( - resource_mock, + client_mock, 'e01df65a', - 'readyish') + 'readyish', + resource_attribute='special_status') def test__resource_reaches_status_bespoke_fail(self): resource_mock = mock.MagicMock() @@ -501,7 +521,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): glance_mock.images.upload.assert_called_once_with( '9d1125af', f(), - ) + backend=None) self.resource_reaches_status.assert_called_once_with( glance_mock.images, '9d1125af', @@ -526,7 +546,12 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): self.upload_image_to_glance.assert_called_once_with( glance_mock, 'wibbly/c.img', - 'bob') + 'bob', + backend=None, + disk_format='qcow2', + visibility='public', + container_format='bare', + force_import=False) def test_create_image_pass_directory(self): glance_mock = mock.MagicMock() @@ -546,7 +571,12 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): self.upload_image_to_glance.assert_called_once_with( glance_mock, 'tests/c.img', - 'bob') + 'bob', + backend=None, + disk_format='qcow2', + visibility='public', + container_format='bare', + force_import=False) self.gettempdir.assert_not_called() def test_create_ssh_key(self): @@ -578,21 +608,27 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): nova_mock.keypairs.create.assert_called_once_with(name='mykeys') def test_get_private_key_file(self): + self.patch_object(openstack_utils.deployment_env, 'get_tmpdir', + return_value='/tmp/zaza-model1') self.assertEqual( openstack_utils.get_private_key_file('mykeys'), - 'tests/id_rsa_mykeys') + '/tmp/zaza-model1/id_rsa_mykeys') def test_write_private_key(self): + self.patch_object(openstack_utils.deployment_env, 'get_tmpdir', + return_value='/tmp/zaza-model1') m = mock.mock_open() with mock.patch( 'zaza.openstack.utilities.openstack.open', m, create=False ): openstack_utils.write_private_key('mykeys', 'keycontents') - m.assert_called_once_with('tests/id_rsa_mykeys', 'w') + m.assert_called_once_with('/tmp/zaza-model1/id_rsa_mykeys', 'w') handle = m() handle.write.assert_called_once_with('keycontents') def test_get_private_key(self): + self.patch_object(openstack_utils.deployment_env, 'get_tmpdir', + return_value='/tmp/zaza-model1') self.patch_object(openstack_utils.os.path, "isfile", return_value=True) m = mock.mock_open(read_data='myprivkey') @@ -604,6 +640,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): 'myprivkey') def test_get_private_key_file_missing(self): + self.patch_object(openstack_utils.deployment_env, 'get_tmpdir', + return_value='/tmp/zaza-model1') self.patch_object(openstack_utils.os.path, "isfile", return_value=False) self.assertIsNone(openstack_utils.get_private_key('mykeys')) @@ -664,17 +702,19 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): []) def test_ping_response(self): - self.patch_object(openstack_utils.subprocess, 'check_call') + self.patch_object(openstack_utils.subprocess, 'run') openstack_utils.ping_response('10.0.0.10') - self.check_call.assert_called_once_with( - ['ping', '-c', '1', '-W', '1', '10.0.0.10'], stdout=-3) + self.run.assert_called_once_with( + ['ping', '-c', '1', '-W', '1', '10.0.0.10'], check=True, + stdout=mock.ANY, stderr=mock.ANY) def test_ping_response_fail(self): openstack_utils.ping_response.retry.wait = \ tenacity.wait_none() - self.patch_object(openstack_utils.subprocess, 'check_call') - self.check_call.side_effect = Exception() - with self.assertRaises(Exception): + self.patch_object(openstack_utils.subprocess, 'run') + self.run.side_effect = subprocess.CalledProcessError(returncode=42, + cmd='mycmd') + with self.assertRaises(subprocess.CalledProcessError): openstack_utils.ping_response('10.0.0.10') def test_ssh_test(self): @@ -735,7 +775,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): 'bob', '10.0.0.10', 'myvm', - password='reallyhardpassord') + password='reallyhardpassord', + retry=False) paramiko_mock.connect.assert_called_once_with( '10.0.0.10', password='reallyhardpassord', @@ -759,7 +800,7 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): privkey='myprivkey') paramiko_mock.connect.assert_called_once_with( '10.0.0.10', - password='', + password=None, pkey='akey', username='bob') @@ -803,23 +844,26 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): name='_get_os_version' ) self.patch( - 'zaza.openstack.utilities.juju.get_machines_for_application', + 'zaza.utilities.juju.get_machines_for_application', new_callable=mock.MagicMock(), name='_get_machines' ) self.patch( - 'zaza.openstack.utilities.juju.get_machine_series', + 'zaza.utilities.juju.get_machine_series', new_callable=mock.MagicMock(), name='_get_machine_series' ) + _machine = mock.MagicMock() + # No machine returned self._get_machines.return_value = [] with self.assertRaises(exceptions.ApplicationNotFound): openstack_utils.get_current_os_release_pair() + self._get_machines.side_effect = None # No series returned - self._get_machines.return_value = ['6'] + self._get_machines.return_value = [_machine] self._get_machine_series.return_value = None with self.assertRaises(exceptions.SeriesNotFound): openstack_utils.get_current_os_release_pair() @@ -842,7 +886,24 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): result = openstack_utils.get_current_os_release_pair() self.assertEqual(expected, result) - def test_get_openstack_release(self): + def test_get_current_os_versions(self): + self.patch_object(openstack_utils, "get_openstack_release") + self.patch_object(openstack_utils.generic_utils, "get_pkg_version") + + # Pre-Wallaby scenario where openstack-release package isn't installed + self.get_openstack_release.return_value = None + self.get_pkg_version.return_value = '18.0.0' + expected = {'keystone': 'victoria'} + result = openstack_utils.get_current_os_versions('keystone') + self.assertEqual(expected, result) + + # Wallaby+ scenario where openstack-release package is installed + self.get_openstack_release.return_value = 'wallaby' + expected = {'keystone': 'wallaby'} + result = openstack_utils.get_current_os_versions('keystone') + self.assertEqual(expected, result) + + def test_get_os_release(self): self.patch( 'zaza.openstack.utilities.openstack.get_current_os_release_pair', new_callable=mock.MagicMock(), @@ -871,6 +932,14 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): release_comp = xenial_queens > xenial_mitaka self.assertTrue(release_comp) + # Check specifying an application + self._get_os_rel_pair.reset_mock() + self._get_os_rel_pair.return_value = 'xenial_mitaka' + expected = 4 + result = openstack_utils.get_os_release(application='myapp') + self.assertEqual(expected, result) + self._get_os_rel_pair.assert_called_once_with(application='myapp') + def test_get_keystone_api_version(self): self.patch_object(openstack_utils, "get_current_os_versions") self.patch_object(openstack_utils, "get_application_config_option") @@ -886,6 +955,23 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): self.get_application_config_option.return_value = None self.assertEqual(openstack_utils.get_keystone_api_version(), 3) + def test_get_openstack_release(self): + self.patch_object(openstack_utils.model, "get_units") + self.patch_object(openstack_utils.juju_utils, "remote_run") + + # Test pre-Wallaby behavior where openstack-release pkg isn't installed + self.get_units.return_value = [] + self.remote_run.return_value = "OPENSTACK_CODENAME=wallaby " + + # Test Wallaby+ behavior where openstack-release package is installed + unit1 = mock.MagicMock() + unit1.entity_id = 1 + self.get_units.return_value = [unit1] + self.remote_run.return_value = "OPENSTACK_CODENAME=wallaby " + + result = openstack_utils.get_openstack_release("application", "model") + self.assertEqual(result, "wallaby") + def test_get_project_id(self): # No domain self.patch_object(openstack_utils, "get_keystone_api_version") @@ -1106,7 +1192,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): self.get_relation_from_unit.assert_called_once_with( 'swift-proxy', 'keystone', - 'identity-service') + 'identity-service', + model_name=None) self.get_keystone_session.assert_called_once_with( { 'OS_AUTH_URL': 'http://10.5.0.61:5000/v3', @@ -1153,7 +1240,8 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): self.get_relation_from_unit.assert_called_once_with( 'swift-proxy', 'keystone', - 'identity-service') + 'identity-service', + model_name=None) self.get_keystone_session.assert_called_once_with( { 'OS_AUTH_URL': 'http://10.5.0.36:5000/v2.0', @@ -1165,3 +1253,280 @@ class TestOpenStackUtils(ut_utils.BaseTestCase): 'OS_PROJECT_NAME': 'services'}, scope='PROJECT', verify=None) + + def test_get_gateway_uuids(self): + self.patch_object(openstack_utils.juju_utils, + 'get_machine_uuids_for_application') + self.get_machine_uuids_for_application.return_value = 'ret' + self.assertEquals(openstack_utils.get_gateway_uuids(), 'ret') + self.get_machine_uuids_for_application.assert_called_once_with( + 'neutron-gateway') + + def test_get_ovs_uuids(self): + self.patch_object(openstack_utils.juju_utils, + 'get_machine_uuids_for_application') + self.get_machine_uuids_for_application.return_value = 'ret' + self.assertEquals(openstack_utils.get_ovs_uuids(), 'ret') + self.get_machine_uuids_for_application.assert_called_once_with( + 'neutron-openvswitch') + + def test_get_ovn_uuids(self): + self.patch_object(openstack_utils.juju_utils, + 'get_machine_uuids_for_application') + self.get_machine_uuids_for_application.return_value = ['ret'] + self.assertEquals(list(openstack_utils.get_ovn_uuids()), + ['ret', 'ret']) + self.get_machine_uuids_for_application.assert_has_calls([ + mock.call('ovn-chassis'), + mock.call('ovn-dedicated-chassis'), + ]) + + def test_dvr_enabled(self): + self.patch_object(openstack_utils, 'get_application_config_option') + openstack_utils.dvr_enabled() + self.get_application_config_option.assert_called_once_with( + 'neutron-api', 'enable-dvr') + + def test_ovn_present(self): + self.patch_object(openstack_utils.model, 'get_application') + self.get_application.side_effect = [None, KeyError] + self.assertTrue(openstack_utils.ovn_present()) + self.get_application.side_effect = [KeyError, None] + self.assertTrue(openstack_utils.ovn_present()) + self.get_application.side_effect = [KeyError, KeyError] + self.assertFalse(openstack_utils.ovn_present()) + + def test_ngw_present(self): + self.patch_object(openstack_utils.model, 'get_application') + self.get_application.side_effect = None + self.assertTrue(openstack_utils.ngw_present()) + self.get_application.side_effect = KeyError + self.assertFalse(openstack_utils.ngw_present()) + + def test_get_charm_networking_data(self): + self.patch_object(openstack_utils, 'deprecated_external_networking') + self.patch_object(openstack_utils, 'dvr_enabled') + self.patch_object(openstack_utils, 'ovn_present') + self.patch_object(openstack_utils, 'ngw_present') + self.patch_object(openstack_utils, 'get_ovs_uuids') + self.patch_object(openstack_utils, 'get_gateway_uuids') + self.patch_object(openstack_utils, 'get_ovn_uuids') + self.patch_object(openstack_utils.model, 'get_application') + self.dvr_enabled.return_value = False + self.ovn_present.return_value = False + self.ngw_present.return_value = False + self.get_ovs_uuids.return_value = [] + self.get_gateway_uuids.return_value = [] + self.get_ovn_uuids.return_value = [] + self.get_application.side_effect = KeyError + + with self.assertRaises(RuntimeError): + openstack_utils.get_charm_networking_data() + self.ngw_present.return_value = True + self.assertEquals( + openstack_utils.get_charm_networking_data(), + openstack_utils.CharmedOpenStackNetworkingData( + openstack_utils.OpenStackNetworkingTopology.ML2_OVS, + ['neutron-gateway'], + mock.ANY, + 'data-port', + {})) + self.dvr_enabled.return_value = True + self.assertEquals( + openstack_utils.get_charm_networking_data(), + openstack_utils.CharmedOpenStackNetworkingData( + openstack_utils.OpenStackNetworkingTopology.ML2_OVS_DVR, + ['neutron-gateway', 'neutron-openvswitch'], + mock.ANY, + 'data-port', + {})) + self.ngw_present.return_value = False + self.assertEquals( + openstack_utils.get_charm_networking_data(), + openstack_utils.CharmedOpenStackNetworkingData( + openstack_utils.OpenStackNetworkingTopology.ML2_OVS_DVR_SNAT, + ['neutron-openvswitch'], + mock.ANY, + 'data-port', + {})) + self.dvr_enabled.return_value = False + self.ovn_present.return_value = True + self.assertEquals( + openstack_utils.get_charm_networking_data(), + openstack_utils.CharmedOpenStackNetworkingData( + openstack_utils.OpenStackNetworkingTopology.ML2_OVN, + ['ovn-chassis'], + mock.ANY, + 'bridge-interface-mappings', + {'ovn-bridge-mappings': 'physnet1:br-ex'})) + self.get_application.side_effect = None + self.assertEquals( + openstack_utils.get_charm_networking_data(), + openstack_utils.CharmedOpenStackNetworkingData( + openstack_utils.OpenStackNetworkingTopology.ML2_OVN, + ['ovn-chassis', 'ovn-dedicated-chassis'], + mock.ANY, + 'bridge-interface-mappings', + {'ovn-bridge-mappings': 'physnet1:br-ex'})) + + def test_get_cacert_absolute_path(self): + self.patch_object(openstack_utils.deployment_env, 'get_tmpdir') + self.get_tmpdir.return_value = '/tmp/default' + self.assertEqual( + openstack_utils.get_cacert_absolute_path('filename'), + '/tmp/default/filename') + + def test_get_cacert(self): + self.patch_object(openstack_utils.deployment_env, 'get_tmpdir') + self.get_tmpdir.return_value = '/tmp/default' + self.patch_object(openstack_utils.os.path, 'exists') + results = { + '/tmp/default/vault_juju_ca_cert.crt': True} + self.exists.side_effect = lambda x: results[x] + self.assertEqual( + openstack_utils.get_cacert(), + '/tmp/default/vault_juju_ca_cert.crt') + + results = { + '/tmp/default/vault_juju_ca_cert.crt': False, + '/tmp/default/keystone_juju_ca_cert.crt': True} + self.assertEqual( + openstack_utils.get_cacert(), + '/tmp/default/keystone_juju_ca_cert.crt') + + results = { + '/tmp/default/vault_juju_ca_cert.crt': False, + '/tmp/default/keystone_juju_ca_cert.crt': False} + self.assertIsNone(openstack_utils.get_cacert()) + + def test_get_remote_ca_cert_file(self): + self.patch_object(openstack_utils.model, 'get_first_unit_name') + self.patch_object( + openstack_utils, + '_get_remote_ca_cert_file_candidates') + self.patch_object(openstack_utils.model, 'scp_from_unit') + self.patch_object(openstack_utils.os.path, 'exists') + self.patch_object(openstack_utils.shutil, 'move') + self.patch_object(openstack_utils.os, 'chmod') + self.patch_object(openstack_utils.tempfile, 'NamedTemporaryFile') + self.patch_object(openstack_utils.deployment_env, 'get_tmpdir') + self.get_tmpdir.return_value = '/tmp/default' + enter_mock = mock.MagicMock() + enter_mock.__enter__.return_value.name = 'tempfilename' + self.NamedTemporaryFile.return_value = enter_mock + self.get_first_unit_name.return_value = 'neutron-api/0' + self._get_remote_ca_cert_file_candidates.return_value = [ + '/tmp/ca1.cert'] + self.exists.return_value = True + + openstack_utils.get_remote_ca_cert_file('neutron-api') + self.scp_from_unit.assert_called_once_with( + 'neutron-api/0', + '/tmp/ca1.cert', + 'tempfilename') + self.chmod.assert_called_once_with('/tmp/default/ca1.cert', 0o644) + self.move.assert_called_once_with( + 'tempfilename', '/tmp/default/ca1.cert') + + +class TestAsyncOpenstackUtils(ut_utils.AioTestCase): + + def setUp(self): + super(TestAsyncOpenstackUtils, self).setUp() + if sys.version_info < (3, 6, 0): + raise unittest.SkipTest("Can't AsyncMock in py35") + model_mock = mock.MagicMock() + test_mock = mock.MagicMock() + + class AsyncContextManagerMock(test_mock): + async def __aenter__(self): + return self + + async def __aexit__(self, *args): + pass + + self.model_mock = model_mock + self.patch_object(openstack_utils.zaza.model, "async_block_until") + + async def _block_until(f, timeout): + # Store the result of the call to _check_ca_present to validate + # tests + self.result = await f() + self.async_block_until.side_effect = _block_until + self.patch('zaza.model.run_in_model', name='_run_in_model') + self._run_in_model.return_value = AsyncContextManagerMock + self._run_in_model().__aenter__.return_value = self.model_mock + + async def test_async_block_until_ca_exists(self): + def _get_action_output(stdout, code, stderr=None): + stderr = stderr or '' + action = mock.MagicMock() + action.data = { + 'results': { + 'Code': code, + 'Stderr': stderr, + 'Stdout': stdout}} + return action + results = { + '/tmp/missing.cert': _get_action_output( + '', + '1', + 'cat: /tmp/missing.cert: No such file or directory'), + '/tmp/good.cert': _get_action_output('CERTIFICATE', '0')} + + async def _run(command, timeout=None): + return results[command.split()[-1]] + self.unit1 = mock.MagicMock() + self.unit2 = mock.MagicMock() + self.unit2.run.side_effect = _run + self.unit1.run.side_effect = _run + self.units = [self.unit1, self.unit2] + _units = mock.MagicMock() + _units.units = self.units + self.model_mock.applications = { + 'keystone': _units + } + self.patch_object( + openstack_utils, + "_async_get_remote_ca_cert_file_candidates") + + # Test a missing cert then a good cert. + self._async_get_remote_ca_cert_file_candidates.return_value = [ + '/tmp/missing.cert', + '/tmp/good.cert'] + await openstack_utils.async_block_until_ca_exists( + 'keystone', + 'CERTIFICATE') + self.assertTrue(self.result) + + # Test a single missing + self._async_get_remote_ca_cert_file_candidates.return_value = [ + '/tmp/missing.cert'] + await openstack_utils.async_block_until_ca_exists( + 'keystone', + 'CERTIFICATE') + self.assertFalse(self.result) + + async def test__async_get_remote_ca_cert_file_candidates(self): + self.patch_object(openstack_utils.zaza.model, "async_get_relation_id") + rel_id_out = { + } + + def _get_relation_id(app, cert_app, model_name, remote_interface_name): + return rel_id_out[cert_app] + self.async_get_relation_id.side_effect = _get_relation_id + + rel_id_out['vault'] = 'certs:1' + r = await openstack_utils._async_get_remote_ca_cert_file_candidates( + 'neutron-api', 'mymodel') + self.assertEqual( + r, + ['/usr/local/share/ca-certificates/vault_juju_ca_cert.crt', + '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt']) + + rel_id_out['vault'] = None + r = await openstack_utils._async_get_remote_ca_cert_file_candidates( + 'neutron-api', 'mymodel') + self.assertEqual( + r, + ['/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt']) diff --git a/unit_tests/utilities/test_zaza_utilities_openstack_upgrade.py b/unit_tests/utilities/test_zaza_utilities_openstack_upgrade.py new file mode 100644 index 0000000..3782926 --- /dev/null +++ b/unit_tests/utilities/test_zaza_utilities_openstack_upgrade.py @@ -0,0 +1,288 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import unit_tests.utils as ut_utils +import zaza.openstack.utilities.openstack_upgrade as openstack_upgrade + + +class TestOpenStackUpgradeUtils(ut_utils.BaseTestCase): + + async def _arun_action_on_units(self, units, cmd, model_name=None, + raise_on_failure=True): + pass + + def setUp(self): + super(TestOpenStackUpgradeUtils, self).setUp() + self.patch_object( + openstack_upgrade.zaza.model, + "async_run_action_on_units") + self.async_run_action_on_units.side_effect = self._arun_action_on_units + self.patch_object( + openstack_upgrade.zaza.model, + "get_units") + self.juju_status = mock.MagicMock() + self.patch_object( + openstack_upgrade.zaza.model, + "get_status", + return_value=self.juju_status) + self.patch_object( + openstack_upgrade.zaza.model, + "set_application_config") + self.patch_object( + openstack_upgrade.zaza.model, + "get_application_config") + self.patch_object( + openstack_upgrade.zaza.model, + "block_until_all_units_idle") + self.patch_object( + openstack_upgrade, + "block_until_mysql_innodb_cluster_has_rw") + + def _get_application_config(app, model_name=None): + app_config = { + 'ceph-mon': {'verbose': {'value': True}, + 'source': {'value': 'old-src'}}, + 'neutron-openvswitch': {'verbose': {'value': True}}, + 'ntp': {'verbose': {'value': True}}, + 'percona-cluster': {'verbose': {'value': True}, + 'source': {'value': 'old-src'}}, + 'cinder': { + 'verbose': {'value': True}, + 'openstack-origin': {'value': 'old-src'}, + 'action-managed-upgrade': {'value': False}}, + 'neutron-api': { + 'verbose': {'value': True}, + 'openstack-origin': {'value': 'old-src'}, + 'action-managed-upgrade': {'value': False}}, + 'nova-compute': { + 'verbose': {'value': True}, + 'openstack-origin': {'value': 'old-src'}, + 'action-managed-upgrade': {'value': False}}, + 'mysql-innodb-cluster': { + 'verbose': {'value': True}, + 'source': {'value': 'old-src'}, + 'action-managed-upgrade': {'value': True}}, + } + return app_config[app] + self.get_application_config.side_effect = _get_application_config + self.juju_status.applications = { + 'mydb': { # Filter as it is on UPGRADE_EXCLUDE_LIST + 'charm': 'cs:percona-cluster'}, + 'neutron-openvswitch': { # Filter as it is a subordinates + 'charm': 'cs:neutron-openvswitch', + 'subordinate-to': 'nova-compute'}, + 'ntp': { # Filter as it has no source option + 'charm': 'cs:ntp'}, + 'mysql-innodb-cluster': { + 'charm': 'cs:mysql-innodb-cluster', + 'units': { + 'mysql-innodb-cluster/0': {}}}, + 'nova-compute': { + 'charm': 'cs:nova-compute', + 'units': { + 'nova-compute/0': { + 'subordinates': { + 'neutron-openvswitch/2': { + 'charm': 'cs:neutron-openvswitch-22'}}}}}, + 'cinder': { + 'charm': 'cs:cinder-23', + 'units': { + 'cinder/1': { + 'subordinates': { + 'cinder-hacluster/0': { + 'charm': 'cs:hacluster-42'}, + 'cinder-ceph/3': { + 'charm': 'cs:cinder-ceph-2'}}}}}} + + def test_pause_units(self): + openstack_upgrade.pause_units(['cinder/1', 'glance/2']) + self.async_run_action_on_units.assert_called_once_with( + ['cinder/1', 'glance/2'], + 'pause', + model_name=None, + raise_on_failure=True) + + def test_resume_units(self): + openstack_upgrade.resume_units(['cinder/1', 'glance/2']) + self.async_run_action_on_units.assert_called_once_with( + ['cinder/1', 'glance/2'], + 'resume', + model_name=None, + raise_on_failure=True) + + def test_action_unit_upgrade(self): + openstack_upgrade.action_unit_upgrade(['cinder/1', 'glance/2']) + self.async_run_action_on_units.assert_called_once_with( + ['cinder/1', 'glance/2'], + 'openstack-upgrade', + model_name=None, + raise_on_failure=True) + + def test_action_upgrade_apps(self): + self.patch_object(openstack_upgrade, "pause_units") + self.patch_object(openstack_upgrade, "action_unit_upgrade") + self.patch_object(openstack_upgrade, "resume_units") + mock_nova_compute_0 = mock.MagicMock() + mock_nova_compute_0.entity_id = 'nova-compute/0' + mock_cinder_1 = mock.MagicMock() + mock_cinder_1.entity_id = 'cinder/1' + units = { + 'nova-compute': [mock_nova_compute_0], + 'cinder': [mock_cinder_1]} + self.get_units.side_effect = lambda app, model_name: units[app] + openstack_upgrade.action_upgrade_apps(['nova-compute', 'cinder']) + pause_calls = [ + mock.call(['cinder-hacluster/0'], model_name=None), + mock.call(['nova-compute/0', 'cinder/1'], model_name=None)] + self.pause_units.assert_has_calls(pause_calls, any_order=False) + action_unit_upgrade_calls = [ + mock.call(['nova-compute/0', 'cinder/1'], model_name=None)] + self.action_unit_upgrade.assert_has_calls( + action_unit_upgrade_calls, + any_order=False) + resume_calls = [ + mock.call(['nova-compute/0', 'cinder/1'], model_name=None), + mock.call(['cinder-hacluster/0'], model_name=None)] + self.resume_units.assert_has_calls(resume_calls, any_order=False) + + def test_action_upgrade_apps_mysql_innodb_cluster(self): + """Verify that mysql-innodb-cluster is settled before complete.""" + self.patch_object(openstack_upgrade, "pause_units") + self.patch_object(openstack_upgrade, "action_unit_upgrade") + self.patch_object(openstack_upgrade, "resume_units") + mock_mysql_innodb_cluster_0 = mock.MagicMock() + mock_mysql_innodb_cluster_0.entity_id = 'mysql-innodb-cluster/0' + units = {'mysql-innodb-cluster': [mock_mysql_innodb_cluster_0]} + self.get_units.side_effect = lambda app, model_name: units[app] + openstack_upgrade.action_upgrade_apps(['mysql-innodb-cluster']) + pause_calls = [ + mock.call(['mysql-innodb-cluster/0'], model_name=None)] + self.pause_units.assert_has_calls(pause_calls, any_order=False) + action_unit_upgrade_calls = [ + mock.call(['mysql-innodb-cluster/0'], model_name=None)] + self.action_unit_upgrade.assert_has_calls( + action_unit_upgrade_calls, + any_order=False) + resume_calls = [ + mock.call(['mysql-innodb-cluster/0'], model_name=None)] + self.resume_units.assert_has_calls(resume_calls, any_order=False) + self.block_until_mysql_innodb_cluster_has_rw.assert_called_once_with( + None) + + def test_set_upgrade_application_config(self): + openstack_upgrade.set_upgrade_application_config( + ['neutron-api', 'cinder'], + 'new-src') + set_app_calls = [ + mock.call( + 'neutron-api', + { + 'openstack-origin': 'new-src', + 'action-managed-upgrade': 'True'}, + model_name=None), + mock.call( + 'cinder', + { + 'openstack-origin': 'new-src', + 'action-managed-upgrade': 'True'}, + model_name=None)] + self.set_application_config.assert_has_calls(set_app_calls) + + self.set_application_config.reset_mock() + openstack_upgrade.set_upgrade_application_config( + ['percona-cluster'], + 'new-src', + action_managed=False) + self.set_application_config.assert_called_once_with( + 'percona-cluster', + {'source': 'new-src'}, + model_name=None) + + def test_is_action_upgradable(self): + self.assertTrue( + openstack_upgrade.is_action_upgradable('cinder')) + self.assertFalse( + openstack_upgrade.is_action_upgradable('percona-cluster')) + + def test_is_already_upgraded(self): + self.assertTrue( + openstack_upgrade.is_already_upgraded('cinder', 'old-src')) + self.assertFalse( + openstack_upgrade.is_already_upgraded('cinder', 'new-src')) + + def test_run_action_upgrade(self): + self.patch_object(openstack_upgrade, "set_upgrade_application_config") + self.patch_object(openstack_upgrade, "action_upgrade_apps") + openstack_upgrade.run_action_upgrades( + ['cinder', 'neutron-api'], + 'new-src') + self.set_upgrade_application_config.assert_called_once_with( + ['cinder', 'neutron-api'], + 'new-src', + model_name=None) + self.action_upgrade_apps.assert_called_once_with( + ['cinder', 'neutron-api'], + model_name=None) + + def test_run_all_in_one_upgrade(self): + self.patch_object(openstack_upgrade, "set_upgrade_application_config") + self.patch_object( + openstack_upgrade.zaza.model, + 'block_until_all_units_idle') + openstack_upgrade.run_all_in_one_upgrades( + ['percona-cluster'], + 'new-src') + self.set_upgrade_application_config.assert_called_once_with( + ['percona-cluster'], + 'new-src', + action_managed=False, + model_name=None) + self.block_until_all_units_idle.assert_called_once_with() + + def test_run_upgrade(self): + self.patch_object(openstack_upgrade, "run_all_in_one_upgrades") + self.patch_object(openstack_upgrade, "run_action_upgrades") + openstack_upgrade.run_upgrade_on_apps( + ['cinder', 'neutron-api', 'ceph-mon'], + 'new-src') + self.run_all_in_one_upgrades.assert_called_once_with( + ['ceph-mon'], + 'new-src', + model_name=None) + self.run_action_upgrades.assert_called_once_with( + ['cinder', 'neutron-api'], + 'new-src', + model_name=None) + + def test_run_upgrade_tests(self): + self.patch_object(openstack_upgrade, "run_upgrade_on_apps") + self.patch_object(openstack_upgrade, "get_upgrade_groups") + self.get_upgrade_groups.return_value = [ + ('Compute', ['nova-compute']), + ('Control Plane', ['cinder', 'neutron-api']), + ('Core Identity', ['keystone']), + ('Storage', ['ceph-mon']), + ('sweep_up', ['designate'])] + openstack_upgrade.run_upgrade_tests('new-src', model_name=None) + run_upgrade_calls = [ + mock.call(['nova-compute'], 'new-src', model_name=None), + mock.call(['cinder', 'neutron-api'], 'new-src', model_name=None), + mock.call(['keystone'], 'new-src', model_name=None), + mock.call(['ceph-mon'], 'new-src', model_name=None), + mock.call(['designate'], 'new-src', model_name=None), + ] + self.run_upgrade_on_apps.assert_has_calls( + run_upgrade_calls, any_order=False) diff --git a/unit_tests/utilities/test_zaza_utilities_parallel_series_upgrade.py b/unit_tests/utilities/test_zaza_utilities_parallel_series_upgrade.py new file mode 100644 index 0000000..70ffd3b --- /dev/null +++ b/unit_tests/utilities/test_zaza_utilities_parallel_series_upgrade.py @@ -0,0 +1,586 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +import sys +import unittest +import unit_tests.utils as ut_utils +import zaza.openstack.utilities.generic as generic_utils +import zaza.openstack.utilities.series_upgrade as series_upgrade +import zaza.openstack.utilities.parallel_series_upgrade as upgrade_utils + +FAKE_STATUS = { + 'can-upgrade-to': '', + 'charm': 'local:trusty/app-136', + 'subordinate-to': [], + 'units': {'app/0': {'leader': True, + 'machine': '0', + 'subordinates': { + 'app-hacluster/0': { + 'charm': 'local:trusty/hacluster-0', + 'leader': True}}}, + 'app/1': {'machine': '1', + 'subordinates': { + 'app-hacluster/1': { + 'charm': 'local:trusty/hacluster-0'}}}, + 'app/2': {'machine': '2', + 'subordinates': { + 'app-hacluster/2': { + 'charm': 'local:trusty/hacluster-0'}}}}} + +FAKE_STATUS_MONGO = { + 'can-upgrade-to': '', + 'charm': 'local:trusty/mongodb-10', + 'subordinate-to': [], + 'units': {'mongo/0': {'leader': True, + 'machine': '0', + 'subordinates': {}}, + 'mongo/1': {'machine': '1', + 'subordinates': {}}, + 'mongo/2': {'machine': '2', + 'subordinates': {}}}} + + +class Test_ParallelSeriesUpgradeSync(ut_utils.BaseTestCase): + def setUp(self): + super(Test_ParallelSeriesUpgradeSync, self).setUp() + # Juju Status Object and data + # self.juju_status = mock.MagicMock() + # self.juju_status.applications.__getitem__.return_value = FAKE_STATUS + # self.patch_object(upgrade_utils, "model") + # self.model.get_status.return_value = self.juju_status + + def test_get_leader_and_non_leaders(self): + expected = ({ + 'app/0': { + 'leader': True, + 'machine': '0', + 'subordinates': { + 'app-hacluster/0': { + 'charm': 'local:trusty/hacluster-0', + 'leader': True}}}}, { + 'app/1': { + 'machine': '1', + 'subordinates': { + 'app-hacluster/1': { + 'charm': 'local:trusty/hacluster-0'}}}, + 'app/2': { + 'machine': '2', + 'subordinates': { + 'app-hacluster/2': { + 'charm': 'local:trusty/hacluster-0'}}}}) + + self.assertEqual( + expected, + upgrade_utils.get_leader_and_non_leaders(FAKE_STATUS) + ) + + def test_app_config_openstack_charm(self): + expected = { + 'origin': 'openstack-origin', + 'pause_non_leader_subordinate': True, + 'pause_non_leader_primary': True, + 'post_upgrade_functions': [], + 'pre_upgrade_functions': [], + 'post_application_upgrade_functions': [], + 'follower_first': False, } + config = upgrade_utils.app_config('keystone') + self.assertEqual(expected, config) + + def test_app_config_mongo(self): + expected = { + 'origin': None, + 'pause_non_leader_subordinate': True, + 'pause_non_leader_primary': True, + 'post_upgrade_functions': [], + 'pre_upgrade_functions': [], + 'post_application_upgrade_functions': [], + 'follower_first': True, } + config = upgrade_utils.app_config('mongodb') + self.assertEqual(expected, config) + + def test_app_config_ceph(self): + expected = { + 'origin': 'source', + 'pause_non_leader_subordinate': False, + 'pause_non_leader_primary': False, + 'post_upgrade_functions': [], + 'pre_upgrade_functions': [], + 'post_application_upgrade_functions': [], + 'follower_first': False, } + config = upgrade_utils.app_config('ceph-mon') + self.assertEqual(expected, config) + + def test_app_config_percona(self): + expected = { + 'origin': 'source', + 'pause_non_leader_subordinate': True, + 'pause_non_leader_primary': True, + 'post_upgrade_functions': [], + 'pre_upgrade_functions': [], + 'post_application_upgrade_functions': [ + ('zaza.openstack.charm_tests.mysql.utils.' + 'complete_cluster_series_upgrade') + ], + 'follower_first': False, } + config = upgrade_utils.app_config('percona-cluster') + self.assertEqual(expected, config) + + +class TestParallelSeriesUpgrade(ut_utils.AioTestCase): + def setUp(self): + super(TestParallelSeriesUpgrade, self).setUp() + if sys.version_info < (3, 6, 0): + raise unittest.SkipTest("Can't AsyncMock in py35") + self.patch_object(series_upgrade, "async_prepare_series_upgrade") + self.patch_object(generic_utils, 'check_call') + # Juju Status Object and data + + self.juju_status = mock.AsyncMock() + self.juju_status.return_value.applications.__getitem__.return_value = \ + FAKE_STATUS + self.patch_object(upgrade_utils, "model") + self.model.async_get_status = self.juju_status + self.async_run_action = mock.AsyncMock() + self.model.async_run_action = self.async_run_action + + self.async_block_until = mock.AsyncMock() + self.model.async_block_until = self.async_block_until + self.model.async_wait_for_unit_idle = mock.AsyncMock() + self.async_run_on_machine = mock.AsyncMock() + self.model.async_run_on_machine = self.async_run_on_machine + self.model.async_block_until_units_on_machine_are_idle = \ + mock.AsyncMock() + + @mock.patch.object(upgrade_utils.cl_utils, 'get_class') + async def test_run_post_application_upgrade_functions( + self, + mock_get_class + ): + called = mock.AsyncMock() + mock_get_class.return_value = called + await upgrade_utils.run_post_application_upgrade_functions( + ['my.thing']) + mock_get_class.assert_called_once_with('my.thing') + called.assert_called() + + @mock.patch.object(upgrade_utils.cl_utils, 'get_class') + async def test_run_pre_upgrade_functions(self, mock_get_class): + called = mock.AsyncMock() + mock_get_class.return_value = called + await upgrade_utils.run_pre_upgrade_functions('1', ['my.thing']) + mock_get_class.assert_called_once_with('my.thing') + called.assert_called_once_with('1') + + @mock.patch.object(upgrade_utils, 'run_post_application_upgrade_functions') + @mock.patch.object( + upgrade_utils.series_upgrade_utils, 'async_prepare_series_upgrade') + @mock.patch.object(upgrade_utils.series_upgrade_utils, 'async_set_series') + @mock.patch.object(upgrade_utils, 'maybe_pause_things') + @mock.patch.object(upgrade_utils, 'series_upgrade_machine') + async def test_parallel_series_upgrade_mongo( + self, + mock_series_upgrade_machine, + mock_maybe_pause_things, + mock_async_set_series, + mock_async_prepare_series_upgrade, + mock_post_application_upgrade_functions, + ): + self.juju_status.return_value.applications.__getitem__.return_value = \ + FAKE_STATUS_MONGO + upgrade_config = upgrade_utils.app_config('mongodb') + await upgrade_utils.parallel_series_upgrade( + 'mongodb', + from_series='trusty', + to_series='xenial', + **upgrade_config + ) + mock_async_set_series.assert_called_once_with( + 'mongodb', to_series='xenial') + self.juju_status.assert_called() + + # The below is using `any_order=True` because the ordering is + # undetermined and differs between python versions + mock_async_prepare_series_upgrade.assert_has_calls([ + mock.call('1', to_series='xenial'), + mock.call('2', to_series='xenial'), + mock.call('0', to_series='xenial'), + ], any_order=True) + mock_maybe_pause_things.assert_called() + mock_series_upgrade_machine.assert_has_calls([ + mock.call( + '1', + origin=None, + application='mongodb', + files=None, + workaround_script=None, + post_upgrade_functions=[]), + mock.call( + '2', + origin=None, + application='mongodb', + files=None, + workaround_script=None, + post_upgrade_functions=[]), + mock.call( + '0', + origin=None, + application='mongodb', + files=None, + workaround_script=None, + post_upgrade_functions=[]), + ]) + mock_post_application_upgrade_functions.assert_called_once_with([]) + + @mock.patch.object(upgrade_utils, 'run_post_application_upgrade_functions') + @mock.patch.object( + upgrade_utils.series_upgrade_utils, 'async_prepare_series_upgrade') + @mock.patch.object(upgrade_utils.series_upgrade_utils, 'async_set_series') + @mock.patch.object(upgrade_utils, 'maybe_pause_things') + @mock.patch.object(upgrade_utils, 'series_upgrade_machine') + async def test_serial_series_upgrade_mongo( + self, + mock_series_upgrade_machine, + mock_maybe_pause_things, + mock_async_set_series, + mock_async_prepare_series_upgrade, + mock_post_application_upgrade_functions, + ): + self.juju_status.return_value.applications.__getitem__.return_value = \ + FAKE_STATUS_MONGO + upgrade_config = upgrade_utils.app_config('mongodb') + await upgrade_utils.serial_series_upgrade( + 'mongodb', + from_series='trusty', + to_series='xenial', + **upgrade_config + ) + mock_async_set_series.assert_called_once_with( + 'mongodb', to_series='xenial') + self.juju_status.assert_called() + mock_async_prepare_series_upgrade.assert_has_calls([ + mock.call('1', to_series='xenial'), + mock.call('2', to_series='xenial'), + mock.call('0', to_series='xenial'), + ]) + mock_maybe_pause_things.assert_called() + mock_series_upgrade_machine.assert_has_calls([ + mock.call( + '1', + origin=None, + application='mongodb', + files=None, + workaround_script=None, + post_upgrade_functions=[]), + mock.call( + '2', + origin=None, + application='mongodb', + files=None, + workaround_script=None, + post_upgrade_functions=[]), + mock.call( + '0', + origin=None, + application='mongodb', + files=None, + workaround_script=None, + post_upgrade_functions=[]), + ]) + mock_post_application_upgrade_functions.assert_called_once_with([]) + + @mock.patch.object(upgrade_utils, 'run_post_application_upgrade_functions') + @mock.patch.object( + upgrade_utils.series_upgrade_utils, 'async_prepare_series_upgrade') + @mock.patch.object(upgrade_utils.series_upgrade_utils, 'async_set_series') + @mock.patch.object(upgrade_utils, 'maybe_pause_things') + @mock.patch.object(upgrade_utils, 'series_upgrade_machine') + async def test_parallel_series_upgrade( + self, + mock_series_upgrade_machine, + mock_maybe_pause_things, + mock_async_set_series, + mock_async_prepare_series_upgrade, + mock_post_application_upgrade_functions, + ): + await upgrade_utils.parallel_series_upgrade( + 'app', + from_series='trusty', + to_series='xenial', + ) + mock_async_set_series.assert_called_once_with( + 'app', to_series='xenial') + self.juju_status.assert_called() + # The below is using `any_order=True` because the ordering is + # undetermined and differs between python versions + mock_async_prepare_series_upgrade.assert_has_calls([ + mock.call('1', to_series='xenial'), + mock.call('2', to_series='xenial'), + mock.call('0', to_series='xenial'), + ], any_order=True) + mock_maybe_pause_things.assert_called() + mock_series_upgrade_machine.assert_has_calls([ + mock.call( + '1', + origin='openstack-origin', + application='app', + files=None, + workaround_script=None, + post_upgrade_functions=None), + mock.call( + '2', + origin='openstack-origin', + application='app', + files=None, + workaround_script=None, + post_upgrade_functions=None), + mock.call( + '0', + origin='openstack-origin', + application='app', + files=None, + workaround_script=None, + post_upgrade_functions=None), + ]) + mock_post_application_upgrade_functions.assert_called_once_with(None) + + @mock.patch.object(upgrade_utils, 'run_post_application_upgrade_functions') + @mock.patch.object( + upgrade_utils.series_upgrade_utils, 'async_prepare_series_upgrade') + @mock.patch.object(upgrade_utils.series_upgrade_utils, 'async_set_series') + @mock.patch.object(upgrade_utils, 'maybe_pause_things') + @mock.patch.object(upgrade_utils, 'series_upgrade_machine') + async def test_serial_series_upgrade( + self, + mock_series_upgrade_machine, + mock_maybe_pause_things, + mock_async_set_series, + mock_async_prepare_series_upgrade, + mock_post_application_upgrade_functions, + ): + await upgrade_utils.serial_series_upgrade( + 'app', + from_series='trusty', + to_series='xenial', + ) + mock_async_set_series.assert_called_once_with( + 'app', to_series='xenial') + self.juju_status.assert_called() + mock_async_prepare_series_upgrade.assert_has_calls([ + mock.call('0', to_series='xenial'), + mock.call('1', to_series='xenial'), + mock.call('2', to_series='xenial'), + ]) + mock_maybe_pause_things.assert_called() + mock_series_upgrade_machine.assert_has_calls([ + mock.call( + '0', + origin='openstack-origin', + application='app', + files=None, + workaround_script=None, + post_upgrade_functions=None), + mock.call( + '1', + origin='openstack-origin', + application='app', + files=None, + workaround_script=None, + post_upgrade_functions=None), + mock.call( + '2', + origin='openstack-origin', + application='app', + files=None, + workaround_script=None, + post_upgrade_functions=None), + ]) + mock_post_application_upgrade_functions.assert_called_once_with(None) + + @mock.patch.object(upgrade_utils, 'add_confdef_file') + @mock.patch.object(upgrade_utils, 'remove_confdef_file') + @mock.patch.object( + upgrade_utils.series_upgrade_utils, 'async_complete_series_upgrade') + @mock.patch.object(upgrade_utils, 'reboot') + @mock.patch.object(upgrade_utils, 'async_do_release_upgrade') + @mock.patch.object(upgrade_utils, 'async_dist_upgrade') + async def test_series_upgrade_machine( + self, + mock_async_dist_upgrade, + mock_async_do_release_upgrade, + mock_reboot, + mock_async_complete_series_upgrade, + mock_remove_confdef_file, + mock_add_confdef_file + ): + await upgrade_utils.series_upgrade_machine( + '1', + post_upgrade_functions=None, + pre_upgrade_functions=None, + files=None, + workaround_script=None) + mock_async_dist_upgrade.assert_called_once_with('1') + mock_async_do_release_upgrade.assert_called_once_with('1') + mock_reboot.assert_called_once_with('1') + mock_async_complete_series_upgrade.assert_called_once_with('1') + mock_remove_confdef_file.assert_called_once_with('1') + mock_add_confdef_file.assert_called_once_with('1') + + @mock.patch.object(upgrade_utils, 'add_confdef_file') + @mock.patch.object(upgrade_utils, 'remove_confdef_file') + @mock.patch.object(upgrade_utils.os_utils, 'async_set_origin') + @mock.patch.object( + upgrade_utils.series_upgrade_utils, 'async_complete_series_upgrade') + @mock.patch.object(upgrade_utils, 'reboot') + @mock.patch.object(upgrade_utils, 'async_do_release_upgrade') + @mock.patch.object(upgrade_utils, 'async_dist_upgrade') + async def test_series_upgrade_machine_with_source( + self, + mock_async_dist_upgrade, + mock_async_do_release_upgrade, + mock_reboot, + mock_async_complete_series_upgrade, + mock_async_set_origin, + mock_remove_confdef_file, + mock_add_confdef_file + ): + await upgrade_utils.series_upgrade_machine( + '1', + origin='openstack-origin', + application='app', + post_upgrade_functions=None, + pre_upgrade_functions=None, + files=None, + workaround_script=None) + mock_async_dist_upgrade.assert_called_once_with('1') + mock_async_do_release_upgrade.assert_called_once_with('1') + mock_reboot.assert_called_once_with('1') + mock_async_complete_series_upgrade.assert_called_once_with('1') + mock_async_set_origin.assert_called_once_with( + 'app', 'openstack-origin') + mock_remove_confdef_file.assert_called_once_with('1') + mock_add_confdef_file.assert_called_once_with('1') + + @mock.patch("asyncio.gather") + async def test_maybe_pause_things_primary(self, mock_gather): + async def _gather(*args): + for f in args: + await f + + mock_gather.side_effect = _gather + await upgrade_utils.maybe_pause_things( + FAKE_STATUS, + ['app/1', 'app/2'], + pause_non_leader_subordinate=False, + pause_non_leader_primary=True) + self.async_run_action.assert_has_calls([ + mock.call('app/1', "pause", action_params={}), + mock.call('app/2', "pause", action_params={}), + ]) + + @mock.patch("asyncio.gather") + async def test_maybe_pause_things_subordinates(self, mock_gather): + async def _gather(*args): + for f in args: + await f + + mock_gather.side_effect = _gather + await upgrade_utils.maybe_pause_things( + FAKE_STATUS, + ['app/1', 'app/2'], + pause_non_leader_subordinate=True, + pause_non_leader_primary=False) + self.async_run_action.assert_has_calls([ + mock.call('app-hacluster/1', "pause", action_params={}), + mock.call('app-hacluster/2', "pause", action_params={}), + ]) + + @mock.patch("asyncio.gather") + async def test_maybe_pause_things_all(self, mock_gather): + async def _gather(*args): + for f in args: + await f + + mock_gather.side_effect = _gather + await upgrade_utils.maybe_pause_things( + FAKE_STATUS, + ['app/1', 'app/2'], + pause_non_leader_subordinate=True, + pause_non_leader_primary=True) + self.async_run_action.assert_has_calls([ + mock.call('app-hacluster/1', "pause", action_params={}), + mock.call('app/1', "pause", action_params={}), + mock.call('app-hacluster/2', "pause", action_params={}), + mock.call('app/2', "pause", action_params={}), + ]) + + async def test_maybe_pause_things_none(self): + await upgrade_utils.maybe_pause_things( + FAKE_STATUS, + ['app/1', 'app/2'], + pause_non_leader_subordinate=False, + pause_non_leader_primary=False) + self.async_run_action.assert_not_called() + + async def test_add_confdef_file(self): + await upgrade_utils.add_confdef_file('1') + cmd = ( + """echo """ + """'DPkg::options { "--force-confdef"; "--force-confnew"; }' | """ + """sudo tee /etc/apt/apt.conf.d/local""" + ) + self.async_run_on_machine.assert_called_once_with( + '1', cmd + ) + + async def test_remove_confdef_file(self): + await upgrade_utils.remove_confdef_file('1') + self.async_run_on_machine.assert_called_once_with( + '1', 'sudo rm /etc/apt/apt.conf.d/local' + ) + + async def test_async_do_release_upgrade(self): + await upgrade_utils.async_do_release_upgrade('1') + do_release_upgrade_cmd = ( + 'yes | sudo DEBIAN_FRONTEND=noninteractive ' + 'do-release-upgrade -f DistUpgradeViewNonInteractive') + self.async_run_on_machine.assert_called_once_with( + '1', do_release_upgrade_cmd, timeout='120m' + ) + + async def test_prepare_series_upgrade(self): + await upgrade_utils.prepare_series_upgrade( + '1', to_series='xenial' + ) + self.async_prepare_series_upgrade.assert_called_once_with( + '1', to_series='xenial' + ) + + async def test_reboot(self): + await upgrade_utils.reboot('1') + self.async_run_on_machine.assert_called_once_with( + '1', 'sudo init 6 & exit' + ) + + async def test_async_dist_upgrade(self): + await upgrade_utils.async_dist_upgrade('1') + apt_update_command = ( + """yes | sudo DEBIAN_FRONTEND=noninteractive """ + """apt-get --assume-yes """ + """-o "Dpkg::Options::=--force-confdef" """ + """-o "Dpkg::Options::=--force-confold" dist-upgrade""") + self.async_run_on_machine.assert_has_calls([ + mock.call('1', 'sudo apt-get update'), + mock.call('1', apt_update_command), + ]) diff --git a/unit_tests/utilities/test_zaza_utilities_series_upgrade.py b/unit_tests/utilities/test_zaza_utilities_series_upgrade.py new file mode 100644 index 0000000..340fc59 --- /dev/null +++ b/unit_tests/utilities/test_zaza_utilities_series_upgrade.py @@ -0,0 +1,275 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock +import unit_tests.utils as ut_utils +import zaza.openstack.utilities.generic as generic_utils +import zaza.openstack.utilities.series_upgrade as series_upgrade_utils + +FAKE_STATUS = { + 'can-upgrade-to': '', + 'charm': 'local:trusty/app-136', + 'subordinate-to': [], + 'units': {'app/0': {'leader': True, + 'machine': '0', + 'subordinates': { + 'app-hacluster/0': { + 'charm': 'local:trusty/hacluster-0', + 'leader': True}}}, + 'app/1': {'machine': '1', + 'subordinates': { + 'app-hacluster/1': { + 'charm': 'local:trusty/hacluster-0'}}}, + 'app/2': {'machine': '2', + 'subordinates': { + 'app-hacluster/2': { + 'charm': 'local:trusty/hacluster-0'}}}}} + + +class TestSeriesUpgrade(ut_utils.BaseTestCase): + def setUp(self): + super(TestSeriesUpgrade, self).setUp() + # Patch all subprocess calls + self.patch( + 'zaza.openstack.utilities.generic.subprocess', + new_callable=mock.MagicMock(), + name='subprocess' + ) + self.patch_object(generic_utils, "run_via_ssh") + # Juju Status Object and data + self.juju_status = mock.MagicMock() + self.juju_status.applications.__getitem__.return_value = FAKE_STATUS + self.patch_object(series_upgrade_utils, "model") + self.model.get_status.return_value = self.juju_status + + def test_series_upgrade(self): + self.patch_object( + series_upgrade_utils.model, "block_until_all_units_idle") + self.patch_object( + series_upgrade_utils.model, "block_until_unit_wl_status") + self.patch_object(series_upgrade_utils.model, "prepare_series_upgrade") + self.patch_object( + series_upgrade_utils.model, "complete_series_upgrade") + self.patch_object(series_upgrade_utils.model, "set_series") + self.patch_object(generic_utils, "set_origin") + self.patch_object(series_upgrade_utils, "wrap_do_release_upgrade") + self.patch_object(generic_utils, "reboot") + _unit = "app/2" + _application = "app" + _machine_num = "4" + _from_series = "xenial" + _to_series = "bionic" + _origin = "source" + _files = ["filename", "scriptname"] + _workaround_script = "scriptname" + series_upgrade_utils.series_upgrade( + _unit, _machine_num, origin=_origin, + to_series=_to_series, from_series=_from_series, + workaround_script=_workaround_script, files=_files) + self.block_until_all_units_idle.called_with() + self.prepare_series_upgrade.assert_called_once_with( + _machine_num, to_series=_to_series) + self.wrap_do_release_upgrade.assert_called_once_with( + _unit, to_series=_to_series, from_series=_from_series, + workaround_script=_workaround_script, files=_files) + self.complete_series_upgrade.assert_called_once_with(_machine_num) + self.set_series.assert_called_once_with(_application, _to_series) + self.set_origin.assert_called_once_with(_application, _origin) + self.reboot.assert_called_once_with(_unit) + + def test_series_upgrade_application_pause_peers_and_subordinates(self): + self.patch_object(series_upgrade_utils.model, "run_action") + self.patch_object(series_upgrade_utils, "series_upgrade") + _application = "app" + _from_series = "xenial" + _to_series = "bionic" + _origin = "source" + _files = ["filename", "scriptname"] + _workaround_script = "scriptname" + _completed_machines = [] + # Peers and Subordinates + _run_action_calls = [ + mock.call("{}-hacluster/1".format(_application), + "pause", action_params={}), + mock.call("{}/1".format(_application), "pause", action_params={}), + mock.call("{}-hacluster/2".format(_application), + "pause", action_params={}), + mock.call("{}/2".format(_application), "pause", action_params={}), + ] + _series_upgrade_calls = [] + for machine_num in ("0", "1", "2"): + _series_upgrade_calls.append( + mock.call("{}/{}".format(_application, machine_num), + machine_num, origin=_origin, + from_series=_from_series, to_series=_to_series, + workaround_script=_workaround_script, files=_files, + post_upgrade_functions=None), + ) + + # Pause primary peers and subordinates + series_upgrade_utils.series_upgrade_application( + _application, origin=_origin, + to_series=_to_series, from_series=_from_series, + pause_non_leader_primary=True, + pause_non_leader_subordinate=True, + completed_machines=_completed_machines, + workaround_script=_workaround_script, files=_files), + self.run_action.assert_has_calls(_run_action_calls) + self.series_upgrade.assert_has_calls(_series_upgrade_calls) + + def test_series_upgrade_application_pause_subordinates(self): + self.patch_object(series_upgrade_utils.model, "run_action") + self.patch_object(series_upgrade_utils, "series_upgrade") + _application = "app" + _from_series = "xenial" + _to_series = "bionic" + _origin = "source" + _files = ["filename", "scriptname"] + _workaround_script = "scriptname" + _completed_machines = [] + # Subordinates only + _run_action_calls = [ + mock.call("{}-hacluster/1".format(_application), + "pause", action_params={}), + mock.call("{}-hacluster/2".format(_application), + "pause", action_params={}), + ] + _series_upgrade_calls = [] + + for machine_num in ("0", "1", "2"): + _series_upgrade_calls.append( + mock.call("{}/{}".format(_application, machine_num), + machine_num, origin=_origin, + from_series=_from_series, to_series=_to_series, + workaround_script=_workaround_script, files=_files, + post_upgrade_functions=None), + ) + + # Pause subordinates + series_upgrade_utils.series_upgrade_application( + _application, origin=_origin, + to_series=_to_series, from_series=_from_series, + pause_non_leader_primary=False, + pause_non_leader_subordinate=True, + completed_machines=_completed_machines, + workaround_script=_workaround_script, files=_files), + self.run_action.assert_has_calls(_run_action_calls) + self.series_upgrade.assert_has_calls(_series_upgrade_calls) + + def test_series_upgrade_application_no_pause(self): + self.patch_object(series_upgrade_utils.model, "run_action") + self.patch_object(series_upgrade_utils, "series_upgrade") + _application = "app" + _from_series = "xenial" + _to_series = "bionic" + _origin = "source" + _series_upgrade_calls = [] + _files = ["filename", "scriptname"] + _workaround_script = "scriptname" + _completed_machines = [] + + for machine_num in ("0", "1", "2"): + _series_upgrade_calls.append( + mock.call("{}/{}".format(_application, machine_num), + machine_num, origin=_origin, + from_series=_from_series, to_series=_to_series, + workaround_script=_workaround_script, files=_files, + post_upgrade_functions=None), + ) + + # No Pausiing + series_upgrade_utils.series_upgrade_application( + _application, origin=_origin, + to_series=_to_series, from_series=_from_series, + pause_non_leader_primary=False, + pause_non_leader_subordinate=False, + completed_machines=_completed_machines, + workaround_script=_workaround_script, files=_files) + self.run_action.assert_not_called() + self.series_upgrade.assert_has_calls(_series_upgrade_calls) + + def test_dist_upgrade(self): + _unit = "app/2" + series_upgrade_utils.dist_upgrade(_unit) + dist_upgrade_cmd = ( + """sudo DEBIAN_FRONTEND=noninteractive apt --assume-yes """ + """-o "Dpkg::Options::=--force-confdef" """ + """-o "Dpkg::Options::=--force-confold" dist-upgrade""") + self.model.run_on_unit.assert_has_calls([ + mock.call(_unit, 'sudo apt update'), + mock.call(_unit, dist_upgrade_cmd)]) + + def test_do_release_upgrade(self): + _unit = "app/2" + series_upgrade_utils.do_release_upgrade(_unit) + self.run_via_ssh.assert_called_once_with( + _unit, + 'DEBIAN_FRONTEND=noninteractive do-release-upgrade ' + '-f DistUpgradeViewNonInteractive') + + def test_wrap_do_release_upgrade(self): + self.patch_object(series_upgrade_utils, "do_release_upgrade") + self.patch_object(series_upgrade_utils.model, "scp_to_unit") + _unit = "app/2" + _from_series = "xenial" + _to_series = "bionic" + _workaround_script = "scriptname" + _files = ["filename", _workaround_script] + _scp_calls = [] + _run_calls = [ + mock.call(_unit, _workaround_script)] + for filename in _files: + _scp_calls.append(mock.call(_unit, filename, filename)) + series_upgrade_utils.wrap_do_release_upgrade( + _unit, to_series=_to_series, from_series=_from_series, + workaround_script=_workaround_script, files=_files) + self.scp_to_unit.assert_has_calls(_scp_calls) + self.run_via_ssh.assert_has_calls(_run_calls) + self.do_release_upgrade.assert_called_once_with(_unit) + + def test_app_config_openstack_charm(self): + upgrade = series_upgrade_utils.async_series_upgrade_application + expected = { + 'origin': 'openstack-origin', + 'pause_non_leader_subordinate': True, + 'pause_non_leader_primary': True, + 'upgrade_function': upgrade, + 'post_upgrade_functions': [], + } + config = series_upgrade_utils.app_config('keystone') + self.assertEqual(expected, config) + + def test_app_config_mongo(self): + upgrade = series_upgrade_utils.async_series_upgrade_non_leaders_first + expected = { + 'origin': None, + 'pause_non_leader_subordinate': True, + 'pause_non_leader_primary': True, + 'upgrade_function': upgrade, + 'post_upgrade_functions': [], + } + config = series_upgrade_utils.app_config('mongodb') + self.assertEqual(expected, config) + + def test_app_config_ceph(self): + upgrade = series_upgrade_utils.async_series_upgrade_application + expected = { + 'origin': 'source', + 'pause_non_leader_subordinate': False, + 'pause_non_leader_primary': False, + 'upgrade_function': upgrade, + 'post_upgrade_functions': [], + } + config = series_upgrade_utils.app_config('ceph-mon') + self.assertEqual(expected, config) diff --git a/unit_tests/utilities/test_zaza_utilities_swift.py b/unit_tests/utilities/test_zaza_utilities_swift.py new file mode 100644 index 0000000..c5fc3da --- /dev/null +++ b/unit_tests/utilities/test_zaza_utilities_swift.py @@ -0,0 +1,187 @@ +import copy +import mock +import unit_tests.utils as ut_utils +import uuid + +import zaza.model +import zaza.openstack.utilities.swift as swift_utils +import zaza.openstack.utilities.juju as juju_utils + +import unit_tests.utilities.swift_test_data as swift_test_data + + +class TestSwiftUtils(ut_utils.BaseTestCase): + + def setUp(self): + super(TestSwiftUtils, self).setUp() + + def test_ObjectReplica_init(self): + obj_rep = swift_utils.ObjectReplica( + "Server:Port Device 10.5.0.38:6000 loop0") + self.assertEqual( + obj_rep.server, + "10.5.0.38") + self.assertEqual( + obj_rep.port, + "6000") + self.assertEqual( + obj_rep.device, + "loop0") + self.assertFalse(obj_rep.handoff_device) + obj_rep = swift_utils.ObjectReplica( + "Server:Port Device 10.5.0.9:6000 loop0 [Handoff]") + self.assertTrue(obj_rep.handoff_device) + + def test_ObjectReplicas(self): + self.patch_object(zaza.model, 'run_on_leader') + self.run_on_leader.return_value = { + 'Stdout': swift_test_data.SWIFT_GET_NODES_STDOUT} + obj_replicas = swift_utils.ObjectReplicas( + 'swift-proxy-region1', + 'account123', + 'my-container', + 'my-object', + swift_test_data.STORAGE_TOPOLOGY, + 'my-model') + self.assertEqual( + sorted(obj_replicas.hand_off_ips), + ['10.5.0.15', '10.5.0.18', '10.5.0.34', '10.5.0.9']) + self.assertEqual( + sorted(obj_replicas.storage_ips), + ['10.5.0.38', '10.5.0.4']) + self.assertEqual( + obj_replicas.placements, + [ + { + 'app_name': 'swift-storage-region2-zone3', + 'region': 2, + 'unit': 'swift-storage-region2-zone3/0', + 'zone': 3}, + { + 'app_name': 'swift-storage-region1-zone3', + 'region': 1, + 'unit': 'swift-storage-region1-zone3/0', + 'zone': 3}]) + self.assertEqual( + obj_replicas.distinct_regions, + [1, 2]) + self.assertEqual( + sorted(obj_replicas.all_zones), + [(1, 3), (2, 3)]) + self.assertEqual( + sorted(obj_replicas.distinct_zones), + [(1, 3), (2, 3)]) + + def test_get_swift_storage_topology(self): + unit_r1z1_mock = mock.MagicMock(public_address='10.5.0.18') + unit_r1z2_mock = mock.MagicMock(public_address='10.5.0.34') + unit_r1z3_mock = mock.MagicMock(public_address='10.5.0.4') + unit_r2z1_mock = mock.MagicMock(public_address='10.5.0.9') + unit_r2z2_mock = mock.MagicMock(public_address='10.5.0.15') + unit_r2z3_mock = mock.MagicMock(public_address='10.5.0.38') + app_units = { + 'swift-storage-region1-zone1': [unit_r1z1_mock], + 'swift-storage-region1-zone2': [unit_r1z2_mock], + 'swift-storage-region1-zone3': [unit_r1z3_mock], + 'swift-storage-region2-zone1': [unit_r2z1_mock], + 'swift-storage-region2-zone2': [unit_r2z2_mock], + 'swift-storage-region2-zone3': [unit_r2z3_mock]} + + expected_topology = copy.deepcopy(swift_test_data.STORAGE_TOPOLOGY) + self.patch_object(juju_utils, 'get_full_juju_status') + self.patch_object(zaza.model, 'get_application_config') + self.patch_object(zaza.model, 'get_units') + juju_status = mock.MagicMock() + juju_status.applications = {} + self.get_full_juju_status.return_value = juju_status + + for app_name, units in app_units.items(): + expected_topology[units[0].public_address]['unit'] = units[0] + + app_config = {} + for app_name in app_units.keys(): + juju_status.applications[app_name] = {'charm': 'cs:swift-storage'} + region = int(app_name.split('-')[2].replace('region', '')) + zone = int(app_name.split('-')[3].replace('zone', '')) + app_config[app_name] = { + 'storage-region': {'value': region}, + 'zone': {'value': zone}} + + self.get_application_config.side_effect = \ + lambda x, model_name: app_config[x] + self.get_units.side_effect = lambda x, model_name: app_units[x] + self.assertEqual( + swift_utils.get_swift_storage_topology(), + expected_topology) + + def test_setup_test_container(self): + swift_client = mock.MagicMock() + self.patch_object(uuid, 'uuid1', return_value='auuid') + swift_client.get_account.return_value = ( + {'x-account-project-domain-id': 'domain-id'}, + 'bob-auuid-container') + self.assertEqual( + swift_utils.setup_test_container(swift_client, 'bob'), + ('bob-auuid-container', 'domain-id')) + swift_client.put_container.assert_called_once_with( + 'bob-auuid-container') + + def test_apply_proxy_config(self): + self.patch_object(zaza.model, 'block_until_all_units_idle') + self.patch_object( + zaza.model, + 'get_application_config', + return_value={ + 'go-faster': { + 'value': False}}) + self.patch_object(zaza.model, 'set_application_config') + swift_utils.apply_proxy_config( + 'proxy-app', + {'go-faster': True}) + self.set_application_config.assert_called_once_with( + 'proxy-app', {'go-faster': True}, model_name=None) + + def test_apply_proxy_config_noop(self): + self.patch_object(zaza.model, 'block_until_all_units_idle') + self.patch_object( + zaza.model, + 'get_application_config', + return_value={ + 'go-faster': { + 'value': True}}) + self.patch_object(zaza.model, 'set_application_config') + swift_utils.apply_proxy_config( + 'proxy-app', + {'go-faster': True}) + self.assertFalse(self.set_application_config.called) + + def test_create_object(self): + self.patch_object(swift_utils, 'setup_test_container') + self.setup_test_container.return_value = ('new-container', 'domain-id') + self.patch_object( + swift_utils, + 'ObjectReplicas', + return_value='obj_replicas') + swift_client = mock.MagicMock() + self.assertEqual( + swift_utils.create_object( + swift_client, + 'proxy-app', + swift_test_data.STORAGE_TOPOLOGY, + 'my-prefix'), + ('new-container', 'zaza_test_object.txt', 'obj_replicas')) + self.setup_test_container.assert_called_once_with( + swift_client, + 'my-prefix') + swift_client.put_object.assert_called_once_with( + 'new-container', + 'zaza_test_object.txt', + content_type='text/plain', + contents='File contents') + self.ObjectReplicas.assert_called_once_with( + 'proxy-app', + 'domain-id', + 'new-container', + 'zaza_test_object.txt', + swift_test_data.STORAGE_TOPOLOGY, + model_name=None) diff --git a/unit_tests/utilities/test_zaza_utilities_upgrade_utils.py b/unit_tests/utilities/test_zaza_utilities_upgrade_utils.py new file mode 100644 index 0000000..f42ba5a --- /dev/null +++ b/unit_tests/utilities/test_zaza_utilities_upgrade_utils.py @@ -0,0 +1,131 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import mock +import pprint + +import unit_tests.utils as ut_utils +import zaza.openstack.utilities.upgrade_utils as openstack_upgrade + + +class TestUpgradeUtils(ut_utils.BaseTestCase): + def setUp(self): + super(TestUpgradeUtils, self).setUp() + self.patch_object( + openstack_upgrade.zaza.model, + "get_units") + self.juju_status = mock.MagicMock() + self.patch_object( + openstack_upgrade.zaza.model, + "get_status", + return_value=self.juju_status) + self.patch_object( + openstack_upgrade.zaza.model, + "get_application_config") + + def _get_application_config(app, model_name=None): + app_config = { + 'ceph-mon': {'verbose': True, 'source': 'old-src'}, + 'neutron-openvswitch': {'verbose': True}, + 'ntp': {'verbose': True}, + 'percona-cluster': {'verbose': True, 'source': 'old-src'}, + 'cinder': { + 'verbose': True, + 'openstack-origin': 'old-src', + 'action-managed-upgrade': False}, + 'neutron-api': { + 'verbose': True, + 'openstack-origin': 'old-src', + 'action-managed-upgrade': False}, + 'nova-compute': { + 'verbose': True, + 'openstack-origin': 'old-src', + 'action-managed-upgrade': False}, + } + return app_config[app] + self.get_application_config.side_effect = _get_application_config + self.juju_status.applications = { + 'mydb': { # Filter as it is on UPGRADE_EXCLUDE_LIST + 'charm': 'cs:percona-cluster'}, + 'neutron-openvswitch': { # Filter as it is a subordinates + 'charm': 'cs:neutron-openvswitch', + 'subordinate-to': 'nova-compute'}, + 'ntp': { # Filter as it has no source option + 'charm': 'cs:ntp'}, + 'nova-compute': { + 'charm': 'cs:nova-compute', + 'units': { + 'nova-compute/0': { + 'subordinates': { + 'neutron-openvswitch/2': { + 'charm': 'cs:neutron-openvswitch-22'}}}}}, + 'cinder': { + 'charm': 'cs:cinder-23', + 'units': { + 'cinder/1': { + 'subordinates': { + 'cinder-hacluster/0': { + 'charm': 'cs:hacluster-42'}, + 'cinder-ceph/3': { + 'charm': 'cs:cinder-ceph-2'}}}}}} + + def test_get_upgrade_candidates(self): + expected = copy.deepcopy(self.juju_status.applications) + self.assertEqual( + openstack_upgrade.get_upgrade_candidates(), + expected) + + def test_get_upgrade_groups(self): + expected = [ + ('Database Services', []), + ('Stateful Services', []), + ('Core Identity', []), + ('Control Plane', ['cinder']), + ('Data Plane', ['nova-compute']), + ('sweep_up', [])] + actual = openstack_upgrade.get_upgrade_groups() + pprint.pprint(expected) + pprint.pprint(actual) + self.assertEqual( + actual, + expected) + + def test_get_series_upgrade_groups(self): + expected = [ + ('Database Services', ['mydb']), + ('Stateful Services', []), + ('Core Identity', []), + ('Control Plane', ['cinder']), + ('Data Plane', ['nova-compute']), + ('sweep_up', ['ntp'])] + actual = openstack_upgrade.get_series_upgrade_groups() + pprint.pprint(expected) + pprint.pprint(actual) + self.assertEqual( + actual, + expected) + + def test_extract_charm_name_from_url(self): + self.assertEqual( + openstack_upgrade.extract_charm_name_from_url( + 'local:bionic/heat-12'), + 'heat') + self.assertEqual( + openstack_upgrade.extract_charm_name_from_url( + 'cs:bionic/heat-12'), + 'heat') + self.assertEqual( + openstack_upgrade.extract_charm_name_from_url('cs:heat'), + 'heat') diff --git a/unit_tests/utils.py b/unit_tests/utils.py index 4694d0d..8e31f45 100644 --- a/unit_tests/utils.py +++ b/unit_tests/utils.py @@ -19,6 +19,7 @@ """Module to provide helper for writing unit tests.""" +import asyncio import contextlib import io import mock @@ -96,3 +97,24 @@ class BaseTestCase(unittest.TestCase): started.return_value = return_value self._patches_start[name] = started setattr(self, name, started) + + +class AioTestCase(BaseTestCase): + def __init__(self, methodName='runTest', loop=None): + self.loop = loop or asyncio.get_event_loop() + self._function_cache = {} + super(AioTestCase, self).__init__(methodName=methodName) + + def coroutine_function_decorator(self, func): + def wrapper(*args, **kw): + return self.loop.run_until_complete(func(*args, **kw)) + return wrapper + + def __getattribute__(self, item): + attr = object.__getattribute__(self, item) + if asyncio.iscoroutinefunction(attr) and item.startswith('test_'): + if item not in self._function_cache: + self._function_cache[item] = ( + self.coroutine_function_decorator(attr)) + return self._function_cache[item] + return attr diff --git a/zaza/openstack/charm_tests/aodh/__init__.py b/zaza/openstack/charm_tests/aodh/__init__.py new file mode 100644 index 0000000..2781b7b --- /dev/null +++ b/zaza/openstack/charm_tests/aodh/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing aodh.""" diff --git a/zaza/openstack/charm_tests/aodh/tests.py b/zaza/openstack/charm_tests/aodh/tests.py new file mode 100644 index 0000000..3b966e5 --- /dev/null +++ b/zaza/openstack/charm_tests/aodh/tests.py @@ -0,0 +1,241 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate Aodh testing.""" + +import logging +import tenacity + +import novaclient.exceptions + +import zaza.model +import zaza.openstack.configure.guest +import zaza.openstack.charm_tests.glance.setup as glance_setup +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.generic as generic_utils +import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.configure.telemetry as telemetry_utils + + +class AodhTest(test_utils.OpenStackBaseTest): + """Encapsulate Aodh tests.""" + + RESOURCE_PREFIX = 'zaza-aodhtests' + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(AodhTest, cls).setUpClass(application_name='aodh') + cls.xenial_ocata = openstack_utils.get_os_release('xenial_ocata') + cls.xenial_newton = openstack_utils.get_os_release('xenial_newton') + cls.bionic_stein = openstack_utils.get_os_release('bionic_stein') + cls.release = openstack_utils.get_os_release() + cls.keystone_session = openstack_utils.get_overcloud_keystone_session() + cls.model_name = zaza.model.get_juju_model() + cls.aodh_client = openstack_utils.get_aodh_session_client( + cls.keystone_session) + + @classmethod + def tearDown(cls): + """Remove test resources.""" + logging.info('Running teardown') + cache_wait = False + for alarm in cls.aodh_client.alarm.list(): + if alarm['name'].startswith(cls.RESOURCE_PREFIX): + cache_wait = True + logging.info('Removing Alarm {}'.format(alarm['name'])) + telemetry_utils.delete_alarm( + cls.aodh_client, + alarm['name'], + cache_wait=False) + if cache_wait: + logging.info('Waiting for alarm cache to clear') + telemetry_utils.alarm_cache_wait() + + @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60), + reraise=True, stop=tenacity.stop_after_attempt(8)) + def query_aodh_api(self): + """Check that aodh api is responding.""" + self.aodh_client.alarm.list() + + @tenacity.retry( + retry=tenacity.retry_if_result(lambda ret: ret is not None), + wait=tenacity.wait_fixed(120), + stop=tenacity.stop_after_attempt(2)) + def _retry_check_commands_on_units(self, cmds, units): + return generic_utils.check_commands_on_units(cmds, units) + + @property + def services(self): + """Return a list of the service that should be running.""" + if self.release >= self.xenial_ocata: + services = [ + 'apache2', + 'aodh-evaluator: AlarmEvaluationService worker(0)', + 'aodh-notifier: AlarmNotifierService worker(0)', + ('aodh-listener: EventAlarmEvaluationService' + ' worker(0)')] + elif self.release >= self.xenial_newton: + services = [ + ('/usr/bin/python /usr/bin/aodh-api --port 8032 -- ' + '--config-file=/etc/aodh/aodh.conf ' + '--log-file=/var/log/aodh/aodh-api.log'), + 'aodh-evaluator - AlarmEvaluationService(0)', + 'aodh-notifier - AlarmNotifierService(0)', + 'aodh-listener - EventAlarmEvaluationService(0)'] + else: + services = [ + 'aodh-api', + 'aodh-evaluator', + 'aodh-notifier', + 'aodh-listener'] + return services + + def test_100_test_api(self): + """Check api by creating an alarm.""" + alarm_name = '{}_test_api_alarm'.format(self.RESOURCE_PREFIX) + logging.info('Creating alarm {}'.format(alarm_name)) + alarm = telemetry_utils.create_server_power_off_alarm( + self.aodh_client, + alarm_name, + 'some-uuid') + alarm_state = telemetry_utils.get_alarm_state( + self.aodh_client, + alarm['alarm_id']) + logging.info('alarm_state: {}'.format(alarm_state)) + # Until data is collected alarm come up in an 'insufficient data' + # state. + self.assertEqual(alarm_state, 'insufficient data') + + def test_900_restart_on_config_change(self): + """Checking restart happens on config change. + + Change debug mode and assert that change propagates to the correct + file and that services are restarted as a result + """ + # Config file affected by juju set config change + conf_file = '/etc/aodh/aodh.conf' + + # Make config change, check for service restarts + self.restart_on_changed_debug_oslo_config_file( + conf_file, + self.services) + self.query_aodh_api() + + def test_901_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + with self.pause_resume( + self.services, + pgrep_full=False): + logging.info("Testing pause resume") + self.query_aodh_api() + + def test_902_nrpe_service_checks(self): + """Confirm that the NRPE service check files are created.""" + units = zaza.model.get_units('aodh') + cmds = [] + if self.release >= self.xenial_ocata: + services = ['aodh-evaluator', 'aodh-notifier', + 'aodh-listener', 'apache2'] + else: + services = ['aodh-api', 'aodh-evaluator', + 'aodh-notifier', 'aodh-listener'] + for check_name in services: + cmds.append( + 'egrep -oh /usr/local.* /etc/nagios/nrpe.d/' + 'check_{}.cfg'.format(check_name) + ) + ret = self._retry_check_commands_on_units(cmds, units) + if ret: + logging.info(ret) + self.assertIsNone(ret, msg=ret) + + +class AodhServerAlarmTest(test_utils.OpenStackBaseTest): + """Test server events trigger Aodh alarms.""" + + RESOURCE_PREFIX = 'zaza-aodhtests' + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(AodhServerAlarmTest, cls).setUpClass(application_name='aodh') + cls.aodh_client = openstack_utils.get_aodh_session_client( + cls.keystone_session) + cls.nova_client = openstack_utils.get_nova_session_client( + cls.keystone_session) + cls.run_resource_cleanup = True + + @classmethod + def resource_cleanup(cls): + """Remove test resources.""" + logging.info('Running teardown') + for alarm in cls.aodh_client.alarm.list(): + if alarm['name'].startswith(cls.RESOURCE_PREFIX): + logging.info('Removing Alarm {}'.format(alarm['name'])) + telemetry_utils.delete_alarm( + cls.aodh_client, + alarm['name'], + cache_wait=False) + for server in cls.nova_client.servers.list(): + if server.name.startswith(cls.RESOURCE_PREFIX): + logging.info('Removing server {}'.format(server.name)) + openstack_utils.delete_resource( + cls.nova_client.servers, + server.id, + msg="server") + + def test_alarm_on_power_off(self): + """Test server alarm is triggered when server is powered off.""" + server_name = '{}-server'.format(self.RESOURCE_PREFIX) + alarm_name = '{}_instance_off'.format(self.RESOURCE_PREFIX) + try: + server = self.nova_client.servers.find(name=server_name) + logging.info("Found existing server {}".format(server_name)) + except novaclient.exceptions.NotFound: + logging.info("Launching new server {}".format(server_name)) + server = zaza.openstack.configure.guest.launch_instance( + glance_setup.LTS_IMAGE_NAME, + vm_name=server_name) + assert server.status == 'ACTIVE', "Server {} not active".format( + server.name) + + logging.info('Deleting alarm {} if it exists'.format(alarm_name)) + telemetry_utils.delete_alarm( + self.aodh_client, + alarm_name, + cache_wait=True) + logging.info('Creating alarm {}'.format(alarm_name)) + alarm_info = telemetry_utils.create_server_power_off_alarm( + self.aodh_client, + alarm_name, + server.id) + alarm_state = telemetry_utils.get_alarm_state( + self.aodh_client, + alarm_info['alarm_id']) + logging.info('Alarm in state {}'.format(alarm_state)) + # Until data is collected alarm come up in an 'insufficient data' + # state. + self.assertEqual(alarm_state, 'insufficient data') + logging.info('Stopping server {}'.format(server.name)) + server.stop() + telemetry_utils.block_until_alarm_state( + self.aodh_client, + alarm_info['alarm_id']) diff --git a/zaza/openstack/charm_tests/barbican/__init__.py b/zaza/openstack/charm_tests/barbican/__init__.py new file mode 100644 index 0000000..eecbd79 --- /dev/null +++ b/zaza/openstack/charm_tests/barbican/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing barbican.""" diff --git a/zaza/openstack/charm_tests/barbican/tests.py b/zaza/openstack/charm_tests/barbican/tests.py new file mode 100644 index 0000000..a11ff1f --- /dev/null +++ b/zaza/openstack/charm_tests/barbican/tests.py @@ -0,0 +1,81 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate barbican testing.""" + +import logging + +import barbicanclient.client as barbican_client +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +class BarbicanTest(test_utils.OpenStackBaseTest): + """Run barbican specific tests.""" + + _SERVICES = ['apache2', 'barbican-worker'] + + def test_110_catalog_endpoints(self): + """Verify that the endpoints are present in the catalog.""" + overcloud_auth = openstack_utils.get_overcloud_auth() + keystone_client = openstack_utils.get_keystone_client( + overcloud_auth) + actual_endpoints = keystone_client.service_catalog.get_endpoints() + for service_type in ('key-manager', 'identity'): + actual_interfaces = [endpoint['interface'] for endpoint in + actual_endpoints[service_type]] + for expected_interface in ('internal', 'admin', 'public'): + assert(expected_interface in actual_interfaces) + + def test_400_api_connection(self): + """Simple api calls to check service is up and responding.""" + logging.info('Authenticating with the barbican endpoint') + overcloud_auth = openstack_utils.get_overcloud_auth() + keystone_client = openstack_utils.get_keystone_client( + overcloud_auth) + keystone_session = openstack_utils.get_overcloud_keystone_session() + barbican_endpoint = keystone_client.service_catalog.url_for( + service_type='key-manager', interface='publicURL') + barbican = barbican_client.Client(session=keystone_session, + endpoint=barbican_endpoint) + + logging.info('Creating a secret') + my_secret = barbican.secrets.create() + my_secret.name = u'Random plain text password' + my_secret.payload = u'password' + + logging.info('Storing the secret') + my_secret_ref = my_secret.store() + assert(my_secret_ref is not None) + + logging.info('Deleting the secret') + my_secret.delete() + + def test_900_restart_on_config_change(self): + """Checking restart happens on config change. + + Change debug mode and assert that change propagates to the correct + file and that services are restarted as a result + """ + self.restart_on_changed_debug_oslo_config_file( + '/etc/barbican/barbican.conf', self._SERVICES) + + def test_910_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + with self.pause_resume(self._SERVICES): + logging.info("Testing pause resume") diff --git a/zaza/openstack/charm_tests/ceilometer/__init__.py b/zaza/openstack/charm_tests/ceilometer/__init__.py new file mode 100644 index 0000000..106535e --- /dev/null +++ b/zaza/openstack/charm_tests/ceilometer/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing ceilometer.""" diff --git a/zaza/openstack/charm_tests/ceilometer/setup.py b/zaza/openstack/charm_tests/ceilometer/setup.py new file mode 100644 index 0000000..0ff1fb5 --- /dev/null +++ b/zaza/openstack/charm_tests/ceilometer/setup.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for configuring Ceilometer.""" + +import logging +import zaza.model as zaza_model +import zaza.openstack.utilities.openstack as openstack_utils + + +def basic_setup(): + """Run setup for testing Ceilometer. + + Setup for testing Ceilometer is currently part of functional + tests. + """ + current_release = openstack_utils.get_os_release() + xenial_ocata = openstack_utils.get_os_release('xenial_ocata') + + if current_release < xenial_ocata: + logging.info( + 'Skipping ceilometer-upgrade as it is not supported before ocata') + return + + logging.debug('Checking ceilometer-upgrade') + + action = zaza_model.run_action_on_leader( + 'ceilometer', + 'ceilometer-upgrade', + raise_on_failure=True) + + return action diff --git a/zaza/openstack/charm_tests/ceilometer/tests.py b/zaza/openstack/charm_tests/ceilometer/tests.py new file mode 100644 index 0000000..c460cdb --- /dev/null +++ b/zaza/openstack/charm_tests/ceilometer/tests.py @@ -0,0 +1,164 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate Ceilometer testing.""" + +import copy +import logging + +import ceilometerclient.v2.client as ceilo_client +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +class CeilometerTest(test_utils.OpenStackBaseTest): + """Encapsulate Ceilometer tests.""" + + CONF_FILE = '/etc/ceilometer/ceilometer.conf' + + XENIAL_PIKE = openstack_utils.get_os_release('xenial_pike') + XENIAL_OCATA = openstack_utils.get_os_release('xenial_ocata') + XENIAL_NEWTON = openstack_utils.get_os_release('xenial_newton') + XENIAL_MITAKA = openstack_utils.get_os_release('xenial_mitaka') + TRUSTY_MITAKA = openstack_utils.get_os_release('trusty_mitaka') + + @classmethod + def setUpClass(cls): + """Run class setup for running Ceilometer tests.""" + super(CeilometerTest, cls).setUpClass() + cls.current_release = openstack_utils.get_os_release() + + @property + def services(self): + """Return a list of services for the selected OpenStack release.""" + services = [] + + if self.application_name == 'ceilometer-agent': + if self.current_release <= CeilometerTest.XENIAL_MITAKA: + services.append('ceilometer-polling') + else: + services.append('ceilometer-polling: AgentManager worker(0)') + return services + + # Note: disabling ceilometer-polling and ceilometer-agent-central due + # to bug 1846390: https://bugs.launchpad.net/bugs/1846390 + if self.current_release >= CeilometerTest.XENIAL_PIKE: + # services.append('ceilometer-polling: AgentManager worker(0)') + services.append('ceilometer-agent-notification: ' + 'NotificationService worker(0)') + elif self.current_release >= CeilometerTest.XENIAL_OCATA: + services.append('ceilometer-collector: CollectorService worker(0)') + # services.append('ceilometer-polling: AgentManager worker(0)') + services.append('ceilometer-agent-notification: ' + 'NotificationService worker(0)') + services.append('apache2') + elif self.current_release >= CeilometerTest.XENIAL_NEWTON: + services.append('ceilometer-collector - CollectorService(0)') + # services.append('ceilometer-polling - AgentManager(0)') + services.append('ceilometer-agent-notification - ' + 'NotificationService(0)') + services.append('ceilometer-api') + else: + services.append('ceilometer-collector') + services.append('ceilometer-api') + services.append('ceilometer-agent-notification') + + if self.current_release < CeilometerTest.TRUSTY_MITAKA: + services.append('ceilometer-alarm-notifier') + services.append('ceilometer-alarm-evaluator') + + return services + + @property + def restartable_services(self): + """Return a list of services that are known to be restartable. + + For the selected OpenStack release these services are known to be able + to be stopped and started with no issues. + """ + # Due to Bug #1861321 ceilometer-collector does not reliably + # restart. + _services = copy.deepcopy(self.services) + if self.current_release <= CeilometerTest.TRUSTY_MITAKA: + try: + _services.remove('ceilometer-collector') + except ValueError: + pass + return _services + + def test_400_api_connection(self): + """Simple api calls to check service is up and responding.""" + if self.current_release >= CeilometerTest.XENIAL_OCATA: + logging.info('Skipping API checks as ceilometer api has been ' + 'removed') + return + + logging.info('Instantiating ceilometer client...') + ceil = ceilo_client.Client( + session=openstack_utils.get_overcloud_keystone_session() + ) + + logging.info('Checking api functionality...') + assert(ceil.samples.list() == []) + assert(ceil.meters.list() == []) + + def test_900_restart_on_config_change(self): + """Checking restart happens on config change.""" + config_name = 'debug' + + if self.application_name == 'ceilometer-agent': + config_name = 'use-internal-endpoints' + + # Expected default and alternate values + current_value = openstack_utils.get_application_config_option( + self.application_name, config_name + ) + assert type(current_value) == bool + new_value = not current_value + + # Convert bool to str + current_value = str(current_value) + new_value = str(new_value) + + set_default = {config_name: current_value} + set_alternate = {config_name: new_value} + + default_entry = {'DEFAULT': {'debug': [current_value]}} + alternate_entry = {'DEFAULT': {'debug': [new_value]}} + + if self.application_name == 'ceilometer-agent': + default_entry = None + alternate_entry = { + 'service_credentials': {'interface': ['internalURL']} + } + + logging.info('changing config: {}'.format(set_alternate)) + self.restart_on_changed( + CeilometerTest.CONF_FILE, + set_default, + set_alternate, + default_entry, + alternate_entry, + self.restartable_services) + + def test_901_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started. + """ + with self.pause_resume(self.restartable_services): + logging.info("Testing pause and resume") diff --git a/zaza/openstack/charm_tests/ceph/benchmarking/__init__.py b/zaza/openstack/charm_tests/ceph/benchmarking/__init__.py new file mode 100644 index 0000000..74fd9bd --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/benchmarking/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for benchmarking ceph.""" diff --git a/zaza/openstack/charm_tests/ceph/benchmarking/tests.py b/zaza/openstack/charm_tests/ceph/benchmarking/tests.py new file mode 100644 index 0000000..9bbf60e --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/benchmarking/tests.py @@ -0,0 +1,124 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ceph Benchmark Tests.""" + +import logging +import re +import unittest + +import zaza.model + + +class BenchmarkTests(unittest.TestCase): + """Ceph Bencharmk Tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph benchmark tests.""" + super().setUpClass() + cls.results_match = "^[A-Z].*" + cls.pool = "zaza_benchmarks" + cls.test_results = {} + cls.time_in_secs = 30 + + def parse_bench_results(self, results_string): + """Parse bench results from string. + + :param results string: Output from rados bench command. + With newlines due to juju run's output. + :type results_string: string + :returns: Dictionary of results summary + :rtype: dict + """ + _results = {} + _lines = results_string.split("\n") + for _line in _lines: + _line = _line.strip() + if re.match(self.results_match, _line): + _keyvalues = _line.split(":") + try: + _results[_keyvalues[0].strip()] = _keyvalues[1].strip() + except IndexError: + # Skipping detailed output for summary details + pass + return _results + + def run_rados_bench(self, action, params=None): + """Run rados bench. + + :param action: String rados bench command i.e. write, rand, seq + :type action: string + :param params: List of string extra parameters to rados bench command + :type params: List[strings] + :returns: Unit run dict result + :rtype: dict + """ + _cmd = "rados bench -p {} {} {}".format( + self.pool, self.time_in_secs, action) + if params: + _cmd += " " + _cmd += " ".join(params) + logging.info( + "Running '{}' for {} seconds ...".format(_cmd, self.time_in_secs)) + _result = zaza.model.run_on_leader( + "ceph-mon", _cmd, timeout=self.time_in_secs + 60) + return _result + + def test_001_create_pool(self): + """Create ceph pool.""" + _cmd = "ceph osd pool create {} 100 100".format(self.pool) + _result = zaza.model.run_on_leader( + "ceph-mon", _cmd) + if _result.get("Code") and not _result.get("Code").startswith('0'): + if "already exists" in _result.get("Stderr", ""): + logging.warning( + "Ceph osd pool {} already exits.".format(self.pool)) + else: + logging.error("Ceph osd pool create failed") + raise Exception(_result.get("Stderr", "")) + + def test_100_rados_bench_write(self): + """Rados bench write test.""" + _result = self.run_rados_bench("write", params=["--no-cleanup"]) + self.test_results["write"] = ( + self.parse_bench_results(_result.get("Stdout", ""))) + + def test_200_rados_bench_read_seq(self): + """Rados bench read sequential test.""" + _result = self.run_rados_bench("seq") + self.test_results["read_seq"] = ( + self.parse_bench_results(_result.get("Stdout", ""))) + + def test_300_rados_bench_read_rand(self): + """Rados bench read random test.""" + _result = self.run_rados_bench("rand") + self.test_results["read_rand"] = ( + self.parse_bench_results(_result.get("Stdout", ""))) + + def test_998_rados_cleanup(self): + """Cleanup rados bench data.""" + _cmd = "rados -p {} cleanup".format(self.pool) + _result = zaza.model.run_on_leader("ceph-mon", _cmd) + if _result.get("Code") and not _result.get("Code").startswith('0'): + logging.warning("rados cleanup failed") + + def test_999_print_rados_bench_results(self): + """Print rados bench results.""" + print("######## Begin Ceph Results ########") + for test, results in self.test_results.items(): + print("##### {} ######".format(test)) + for key, value in results.items(): + print("{}: {}".format(key, value)) + print("######## End Ceph Results ########") diff --git a/zaza/openstack/charm_tests/ceph/dashboard/__init__.py b/zaza/openstack/charm_tests/ceph/dashboard/__init__.py new file mode 100644 index 0000000..f34c394 --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/dashboard/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing ``ceph-dashboard``.""" diff --git a/zaza/openstack/charm_tests/ceph/dashboard/tests.py b/zaza/openstack/charm_tests/ceph/dashboard/tests.py new file mode 100644 index 0000000..e7c8863 --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/dashboard/tests.py @@ -0,0 +1,97 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulating `ceph-dashboard` testing.""" + +import collections +import os +import requests + +import zaza +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.utilities.deployment_env as deployment_env + + +class CephDashboardTest(test_utils.BaseCharmTest): + """Class for `ceph-dashboard` tests.""" + + REMOTE_CERT_FILE = ('/usr/local/share/ca-certificates/' + 'vault_ca_cert_dashboard.crt') + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph dashboard tests.""" + super().setUpClass() + cls.application_name = 'ceph-dashboard' + cls.local_ca_cert = cls.collect_ca() + + @classmethod + def collect_ca(cls): + """Collect CA from ceph-dashboard unit.""" + local_ca_cert = os.path.join( + deployment_env.get_tmpdir(), + os.path.basename(cls.REMOTE_CERT_FILE)) + if not os.path.isfile(local_ca_cert): + units = zaza.model.get_units(cls.application_name) + zaza.model.scp_from_unit( + units[0].entity_id, + cls.REMOTE_CERT_FILE, + local_ca_cert) + return local_ca_cert + + def test_dashboard_units(self): + """Check dashboard units are configured correctly.""" + # XXX: Switch to using CA for verification when + # https://bugs.launchpad.net/cloud-archive/+bug/1933410 + # is fix released. + # verify = self.local_ca_cert + verify = False + units = zaza.model.get_units(self.application_name) + rcs = collections.defaultdict(list) + for unit in units: + r = requests.get( + 'https://{}:8443'.format(unit.public_address), + verify=verify, + allow_redirects=False) + rcs[r.status_code].append(unit.public_address) + self.assertEqual(len(rcs[requests.codes.ok]), 1) + self.assertEqual(len(rcs[requests.codes.see_other]), len(units) - 1) + + def create_user(self, username, role='administrator'): + """Create a dashboard user. + + :param username: Username to create. + :type username: str + :param role: Role to grant to user. + :type role: str + :returns: Results from action. + :rtype: juju.action.Action + """ + action = zaza.model.run_action_on_leader( + 'ceph-dashboard', + 'add-user', + action_params={ + 'username': username, + 'role': role}) + return action + + def test_create_user(self): + """Test create user action.""" + test_user = 'marvin' + action = self.create_user(test_user) + self.assertEqual(action.status, "completed") + self.assertTrue(action.data['results']['password']) + action = self.create_user(test_user) + # Action should fail as the user already exists + self.assertEqual(action.status, "failed") diff --git a/zaza/openstack/charm_tests/ceph/fs/__init__.py b/zaza/openstack/charm_tests/ceph/fs/__init__.py new file mode 100644 index 0000000..b9ae308 --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/fs/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing ceph-fs.""" diff --git a/zaza/openstack/charm_tests/ceph/fs/tests.py b/zaza/openstack/charm_tests/ceph/fs/tests.py new file mode 100644 index 0000000..28ac259 --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/fs/tests.py @@ -0,0 +1,124 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate CephFS testing.""" + +import logging +from tenacity import Retrying, stop_after_attempt, wait_exponential + +import zaza.model as model +import zaza.openstack.charm_tests.neutron.tests as neutron_tests +import zaza.openstack.charm_tests.nova.utils as nova_utils +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.configure.guest as guest +import zaza.openstack.utilities.openstack as openstack_utils + + +class CephFSTests(test_utils.OpenStackBaseTest): + """Encapsulate CephFS tests.""" + + RESOURCE_PREFIX = 'zaza-cephfstests' + INSTANCE_USERDATA = """#cloud-config +packages: +- ceph-fuse +- python +mounts: + - [ 'none', '/mnt/cephfs', 'fuse.ceph', 'ceph.id=admin,ceph.conf=/etc/ceph/ceph.conf,_netdev,defaults', '0', '0' ] +write_files: +- content: | +{} + path: /etc/ceph/ceph.conf +- content: | +{} + path: /etc/ceph/ceph.client.admin.keyring +""" # noqa + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(CephFSTests, cls).setUpClass() + + def test_cephfs_share(self): + """Test that CephFS shares can be accessed on two instances. + + 1. Spawn two servers + 2. mount it on both + 3. write a file on one + 4. read it on the other + 5. profit + """ + keyring = model.run_on_leader( + 'ceph-mon', 'cat /etc/ceph/ceph.client.admin.keyring')['Stdout'] + conf = model.run_on_leader( + 'ceph-mon', 'cat /etc/ceph/ceph.conf')['Stdout'] + # Spawn Servers + instance_1, instance_2 = self.launch_guests( + userdata=self.INSTANCE_USERDATA.format( + _indent(conf, 8), + _indent(keyring, 8))) + + # Write a file on instance_1 + def verify_setup(stdin, stdout, stderr): + status = stdout.channel.recv_exit_status() + self.assertEqual(status, 0) + + fip_1 = neutron_tests.floating_ips_from_instance(instance_1)[0] + fip_2 = neutron_tests.floating_ips_from_instance(instance_2)[0] + username = guest.boot_tests['bionic']['username'] + password = guest.boot_tests['bionic'].get('password') + privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME) + + for attempt in Retrying( + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=2, max=10)): + with attempt: + openstack_utils.ssh_command( + username, fip_1, 'instance-1', + 'sudo mount -a && ' + 'echo "test" | sudo tee /mnt/cephfs/test', + password=password, privkey=privkey, verify=verify_setup) + + def verify(stdin, stdout, stderr): + status = stdout.channel.recv_exit_status() + self.assertEqual(status, 0) + out = "" + for line in iter(stdout.readline, ""): + out += line + self.assertEqual(out, "test\n") + + openstack_utils.ssh_command( + username, fip_2, 'instance-2', + 'sudo mount -a && ' + 'sudo cat /mnt/cephfs/test', + password=password, privkey=privkey, verify=verify) + + +def _indent(text, amount, ch=' '): + padding = amount * ch + return ''.join(padding+line for line in text.splitlines(True)) + + +class CharmOperationTest(test_utils.BaseCharmTest): + """CephFS Charm operation tests.""" + + def test_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped, then resume and check + they are started. + """ + services = ['ceph-mds'] + with self.pause_resume(services): + logging.info('Testing pause resume (services="{}")' + .format(services)) diff --git a/zaza/openstack/charm_tests/ceph/iscsi/__init__.py b/zaza/openstack/charm_tests/ceph/iscsi/__init__.py new file mode 100644 index 0000000..55b06b5 --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/iscsi/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing ``ceph-iscsi``.""" diff --git a/zaza/openstack/charm_tests/ceph/iscsi/setup.py b/zaza/openstack/charm_tests/ceph/iscsi/setup.py new file mode 100644 index 0000000..abd6a73 --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/iscsi/setup.py @@ -0,0 +1,30 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Setup for ceph iscsi gateway deployments.""" + +import zaza.model + + +def basic_guest_setup(): + """Run basic setup for iscsi guest.""" + for unit in zaza.model.get_units('ubuntu'): + setup_cmds = [ + "apt install --yes open-iscsi multipath-tools", + "systemctl start iscsi", + "systemctl start iscsid"] + for cmd in setup_cmds: + zaza.model.run_on_unit( + unit.entity_id, + cmd) diff --git a/zaza/openstack/charm_tests/ceph/iscsi/tests.py b/zaza/openstack/charm_tests/ceph/iscsi/tests.py new file mode 100644 index 0000000..0766e4a --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/iscsi/tests.py @@ -0,0 +1,311 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulating `ceph-iscsi` testing.""" + +import logging +import tempfile + +import zaza +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.generic as generic_utils + + +class CephISCSIGatewayTest(test_utils.BaseCharmTest): + """Class for `ceph-iscsi` tests.""" + + GW_IQN = "iqn.2003-03.com.canonical.iscsi-gw:iscsi-igw" + DATA_POOL_NAME = 'zaza_rep_pool' + EC_PROFILE_NAME = 'zaza_iscsi' + EC_DATA_POOL = 'zaza_ec_data_pool' + EC_METADATA_POOL = 'zaza_ec_metadata_pool' + + def get_client_initiatorname(self, unit): + """Return the initiatorname for the given unit. + + :param unit_name: Name of unit to match + :type unit: str + :returns: Initiator name + :rtype: str + """ + generic_utils.assertRemoteRunOK(zaza.model.run_on_unit( + unit, + ('cp /etc/iscsi/initiatorname.iscsi /tmp; ' + 'chmod 644 /tmp/initiatorname.iscsi'))) + with tempfile.TemporaryDirectory() as tmpdirname: + tmp_file = '{}/{}'.format(tmpdirname, 'initiatorname.iscsi') + zaza.model.scp_from_unit( + unit, + '/tmp/initiatorname.iscsi', + tmp_file) + with open(tmp_file, 'r') as stream: + contents = stream.readlines() + initiatorname = None + for line in contents: + if line.startswith('InitiatorName'): + initiatorname = line.split('=')[1].rstrip() + return initiatorname + + def get_base_ctxt(self): + """Generate a context for running gwcli commands to create a target. + + :returns: Base gateway context + :rtype: Dict + """ + gw_units = zaza.model.get_units('ceph-iscsi') + host_names = generic_utils.get_unit_hostnames(gw_units, fqdn=True) + client_entity_ids = [ + u.entity_id for u in zaza.model.get_units('ubuntu')] + ctxt = { + 'client_entity_ids': sorted(client_entity_ids), + 'gw_iqn': self.GW_IQN, + 'chap_creds': 'username={chap_username} password={chap_password}', + 'gwcli_gw_dir': '/iscsi-targets/{gw_iqn}/gateways', + 'gwcli_hosts_dir': '/iscsi-targets/{gw_iqn}/hosts', + 'gwcli_disk_dir': '/disks', + 'gwcli_client_dir': '{gwcli_hosts_dir}/{client_initiatorname}', + } + ctxt['gateway_units'] = [ + { + 'entity_id': u.entity_id, + 'ip': u.public_address, + 'hostname': host_names[u.entity_id]} + for u in zaza.model.get_units('ceph-iscsi')] + ctxt['gw_ip'] = sorted([g['ip'] for g in ctxt['gateway_units']])[0] + return ctxt + + def run_commands(self, unit_name, commands, ctxt): + """Run commands on unit. + + Iterate over each command and apply the context to the command, then + run the command on the supplied unit. + + :param unit_name: Name of unit to match + :type unit: str + :param commands: List of commands to run. + :type commands: List[str] + :param ctxt: Context to apply to each command. + :type ctxt: Dict + :raises: AssertionError + """ + for _cmd in commands: + cmd = _cmd.format(**ctxt) + generic_utils.assertRemoteRunOK(zaza.model.run_on_unit( + unit_name, + cmd)) + + def create_iscsi_target(self, ctxt): + """Create target on gateway. + + :param ctxt: Base gateway context + :type ctxt: Dict + """ + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-iscsi', + 'create-target', + action_params={ + 'gateway-units': ' '.join([g['entity_id'] + for g in ctxt['gateway_units']]), + 'iqn': self.GW_IQN, + 'rbd-pool-name': ctxt.get('pool_name', ''), + 'ec-rbd-metadata-pool': ctxt.get('ec_meta_pool_name', ''), + 'image-size': ctxt['img_size'], + 'image-name': ctxt['img_name'], + 'client-initiatorname': ctxt['client_initiatorname'], + 'client-username': ctxt['chap_username'], + 'client-password': ctxt['chap_password'] + })) + + def login_iscsi_target(self, ctxt): + """Login to the iscsi target on client. + + :param ctxt: Base gateway context + :type ctxt: Dict + """ + logging.info("Logging in to iscsi target") + base_op_cmd = ('iscsiadm --mode node --targetname {gw_iqn} ' + '--op=update ').format(**ctxt) + setup_cmds = [ + 'iscsiadm -m discovery -t st -p {gw_ip}', + base_op_cmd + '-n node.session.auth.authmethod -v CHAP', + base_op_cmd + '-n node.session.auth.username -v {chap_username}', + base_op_cmd + '-n node.session.auth.password -v {chap_password}', + 'iscsiadm --mode node --targetname {gw_iqn} --login'] + self.run_commands(ctxt['client_entity_id'], setup_cmds, ctxt) + + def logout_iscsi_targets(self, ctxt): + """Logout of iscsi target on client. + + :param ctxt: Base gateway context + :type ctxt: Dict + """ + logging.info("Logging out of iscsi target") + logout_cmds = [ + 'iscsiadm --mode node --logoutall=all'] + self.run_commands(ctxt['client_entity_id'], logout_cmds, ctxt) + + def check_client_device(self, ctxt, init_client=True): + """Wait for multipath device to appear on client and test access. + + :param ctxt: Base gateway context + :type ctxt: Dict + :param init_client: Initialise client if this is the first time it has + been used. + :type init_client: bool + """ + logging.info("Checking multipath device is present.") + device_ctxt = { + 'bdevice': '/dev/dm-0', + 'mount_point': '/mnt/iscsi', + 'test_file': '/mnt/iscsi/test.data'} + ls_bdevice_cmd = 'ls -l {bdevice}' + mkfs_cmd = 'mke2fs {bdevice}' + mkdir_cmd = 'mkdir {mount_point}' + mount_cmd = 'mount {bdevice} {mount_point}' + umount_cmd = 'umount {mount_point}' + check_mounted_cmd = 'mountpoint {mount_point}' + write_cmd = 'truncate -s 1M {test_file}' + check_file = 'ls -l {test_file}' + if init_client: + commands = [ + mkfs_cmd, + mkdir_cmd, + mount_cmd, + check_mounted_cmd, + write_cmd, + check_file, + umount_cmd] + else: + commands = [ + mount_cmd, + check_mounted_cmd, + check_file, + umount_cmd] + + async def check_device_present(): + run = await zaza.model.async_run_on_unit( + ctxt['client_entity_id'], + ls_bdevice_cmd.format(bdevice=device_ctxt['bdevice'])) + return device_ctxt['bdevice'] in run['stdout'] + + logging.info("Checking {} is present on {}".format( + device_ctxt['bdevice'], + ctxt['client_entity_id'])) + zaza.model.block_until(check_device_present) + logging.info("Checking mounting device and access") + self.run_commands(ctxt['client_entity_id'], commands, device_ctxt) + + def create_data_pool(self): + """Create data pool to back iscsi targets.""" + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-mon', + 'create-pool', + action_params={ + 'name': self.DATA_POOL_NAME})) + + def create_ec_data_pool(self): + """Create data pool to back iscsi targets.""" + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-mon', + 'create-erasure-profile', + action_params={ + 'name': self.EC_PROFILE_NAME, + 'coding-chunks': 2, + 'data-chunks': 4, + 'plugin': 'jerasure'})) + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-mon', + 'create-pool', + action_params={ + 'name': self.EC_DATA_POOL, + 'pool-type': 'erasure-coded', + 'allow-ec-overwrites': True, + 'erasure-profile-name': self.EC_PROFILE_NAME})) + generic_utils.assertActionRanOK(zaza.model.run_action_on_leader( + 'ceph-mon', + 'create-pool', + action_params={ + 'name': self.EC_METADATA_POOL})) + + def run_client_checks(self, test_ctxt): + """Check access to mulipath device. + + Write a filesystem to device, mount it and write data. Then unmount + and logout the iscsi target, finally reconnect and remount checking + data is still present. + + :param test_ctxt: Test context. + :type test_ctxt: Dict + """ + self.create_iscsi_target(test_ctxt) + self.login_iscsi_target(test_ctxt) + self.check_client_device(test_ctxt, init_client=True) + self.logout_iscsi_targets(test_ctxt) + self.login_iscsi_target(test_ctxt) + self.check_client_device(test_ctxt, init_client=False) + + def test_create_and_mount_volume(self): + """Test creating a target and mounting it on a client.""" + self.create_data_pool() + ctxt = self.get_base_ctxt() + client_entity_id = ctxt['client_entity_ids'][0] + ctxt.update({ + 'client_entity_id': client_entity_id, + 'client_initiatorname': self.get_client_initiatorname( + client_entity_id), + 'pool_name': self.DATA_POOL_NAME, + 'chap_username': 'myiscsiusername1', + 'chap_password': 'myiscsipassword1', + 'img_size': '1G', + 'img_name': 'disk_rep_1'}) + self.run_client_checks(ctxt) + + def test_create_and_mount_ec_backed_volume(self): + """Test creating an EC backed target and mounting it on a client.""" + self.create_ec_data_pool() + ctxt = self.get_base_ctxt() + client_entity_id = ctxt['client_entity_ids'][1] + ctxt.update({ + 'client_entity_id': client_entity_id, + 'client_initiatorname': self.get_client_initiatorname( + client_entity_id), + 'pool_name': self.EC_DATA_POOL, + 'ec_meta_pool_name': self.EC_METADATA_POOL, + 'chap_username': 'myiscsiusername2', + 'chap_password': 'myiscsipassword2', + 'img_size': '2G', + 'img_name': 'disk_ec_1'}) + self.run_client_checks(ctxt) + + def test_create_and_mount_volume_default_pool(self): + """Test creating a target and mounting it on a client.""" + self.create_data_pool() + ctxt = self.get_base_ctxt() + client_entity_id = ctxt['client_entity_ids'][2] + ctxt.update({ + 'client_entity_id': client_entity_id, + 'client_initiatorname': self.get_client_initiatorname( + client_entity_id), + 'chap_username': 'myiscsiusername3', + 'chap_password': 'myiscsipassword3', + 'img_size': '3G', + 'img_name': 'disk_default_1'}) + self.run_client_checks(ctxt) + + def test_pause_resume(self): + """Test pausing and resuming a unit.""" + with self.pause_resume( + ['rbd-target-api', 'rbd-target-gw'], + pgrep_full=True): + logging.info("Testing pause resume") diff --git a/zaza/openstack/charm_tests/ceph/mon/__init__.py b/zaza/openstack/charm_tests/ceph/mon/__init__.py new file mode 100644 index 0000000..867c3af --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/mon/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing ceph-mon for cinder-ceph.""" diff --git a/zaza/openstack/charm_tests/ceph/mon/tests.py b/zaza/openstack/charm_tests/ceph/mon/tests.py new file mode 100644 index 0000000..4258a36 --- /dev/null +++ b/zaza/openstack/charm_tests/ceph/mon/tests.py @@ -0,0 +1,200 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ceph-mon Testing for cinder-ceph.""" + +import logging + +import zaza.model + +from zaza.openstack.utilities import ( + generic as generic_utils, + openstack as openstack_utils, + exceptions as zaza_exceptions +) +import zaza.openstack.charm_tests.test_utils as test_utils + + +class CinderCephMonTest(test_utils.OpenStackBaseTest): + """Verify that the ceph mon units are healthy.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph mon tests with cinder.""" + super().setUpClass() + + # ported from the cinder-ceph Amulet test + def test_499_ceph_cmds_exit_zero(self): + """Verify expected state with security-checklist.""" + logging.info("Checking exit values are 0 on ceph commands.") + + units = zaza.model.get_units("ceph-mon", model_name=self.model_name) + current_release = openstack_utils.get_os_release() + bionic_train = openstack_utils.get_os_release('bionic_train') + if current_release < bionic_train: + units.extend(zaza.model.get_units("cinder-ceph", + model_name=self.model_name)) + + commands = [ + 'sudo ceph health', + 'sudo ceph mds stat', + 'sudo ceph pg stat', + 'sudo ceph osd stat', + 'sudo ceph mon stat', + ] + + for unit in units: + run_commands(unit.name, commands) + + # ported from the cinder-ceph Amulet test + def test_500_ceph_alternatives_cleanup(self): + """Check ceph alternatives removed when ceph-mon relation is broken.""" + # Skip this test if release is less than xenial_ocata as in that case + # cinder HAS a relation with ceph directly and this test would fail + current_release = openstack_utils.get_os_release() + xenial_ocata = openstack_utils.get_os_release('xenial_ocata') + if current_release < xenial_ocata: + logging.info("Skipping test as release < xenial-ocata") + return + + units = zaza.model.get_units("cinder-ceph", + model_name=self.model_name) + + # check each unit prior to breaking relation + for unit in units: + dir_list = directory_listing(unit.name, "/etc/ceph") + if 'ceph.conf' in dir_list: + logging.debug( + "/etc/ceph/ceph.conf exists BEFORE relation-broken") + else: + raise zaza_exceptions.CephGenericError( + "unit: {} - /etc/ceph/ceph.conf does not exist " + "BEFORE relation-broken".format(unit.name)) + + # remove the relation so that /etc/ceph/ceph.conf is removed + logging.info("Removing ceph-mon:client <-> cinder-ceph:ceph relation") + zaza.model.remove_relation( + "ceph-mon", "ceph-mon:client", "cinder-ceph:ceph") + # zaza.model.wait_for_agent_status() + logging.info("Wait till relation is removed...") + ceph_mon_units = zaza.model.get_units("ceph-mon", + model_name=self.model_name) + conditions = [ + invert_condition( + does_relation_exist( + u.name, "ceph-mon", "cinder-ceph", "ceph", + self.model_name)) + for u in ceph_mon_units] + zaza.model.block_until(*conditions) + + logging.info("Checking each unit after breaking relation...") + for unit in units: + dir_list = directory_listing(unit.name, "/etc/ceph") + if 'ceph.conf' not in dir_list: + logging.debug( + "/etc/ceph/ceph.conf removed AFTER relation-broken") + else: + raise zaza_exceptions.CephGenericError( + "unit: {} - /etc/ceph/ceph.conf still exists " + "AFTER relation-broken".format(unit.name)) + + # Restore cinder-ceph and ceph-mon relation to keep tests idempotent + logging.info("Restoring ceph-mon:client <-> cinder-ceph:ceph relation") + zaza.model.add_relation( + "ceph-mon", "ceph-mon:client", "cinder-ceph:ceph") + conditions = [ + does_relation_exist( + u.name, "ceph-mon", "cinder-ceph", "ceph", self.model_name) + for u in ceph_mon_units] + logging.info("Wait till model is idle ...") + zaza.model.block_until(*conditions) + zaza.model.block_until_all_units_idle() + logging.info("... Done.") + + +def does_relation_exist(unit_name, + application_name, + remote_application_name, + remote_interface_name, + model_name): + """For use in async blocking function, return True if it exists. + + :param unit_name: the unit (by name) that to check on. + :type unit_name: str + :param application_name: Name of application on this side of relation + :type application_name: str + :param remote_application_name: the relation name at that unit to check for + :type relation_application_name: str + :param remote_interface_name: the interface name at that unit to check for + :type relation_interface_name: str + :param model_name: the model to check on + :type model_name: str + :returns: Corouting that returns True if the relation was found + :rtype: Coroutine[[], boolean] + """ + async def _async_does_relation_exist_closure(): + async with zaza.model.run_in_model(model_name) as model: + spec = "{}:{}".format( + remote_application_name, remote_interface_name) + for rel in model.applications[application_name].relations: + if rel.matches(spec): + return True + return False + return _async_does_relation_exist_closure + + +def invert_condition(async_condition): + """Invert the condition provided so it can be provided to the blocking fn. + + :param async_condition: the async callable that is the test + :type async_condition: Callable[] + :returns: Corouting that returns not of the result of a the callable + :rtype: Coroutine[[], bool] + """ + async def _async_invert_condition_closure(): + return not(await async_condition()) + return _async_invert_condition_closure + + +def run_commands(unit_name, commands): + """Run commands on unit. + + Apply context to commands until all variables have been replaced, then + run the command on the given unit. + """ + errors = [] + for cmd in commands: + try: + generic_utils.assertRemoteRunOK(zaza.model.run_on_unit( + unit_name, + cmd)) + except Exception as e: + errors.append("unit: {}, command: {}, error: {}" + .format(unit_name, cmd, str(e))) + if errors: + raise zaza_exceptions.CephGenericError("\n".join(errors)) + + +def directory_listing(unit_name, directory): + """Return a list of files/directories from a directory on a unit. + + :param unit_name: the unit to fetch the directory listing from + :type unit_name: str + :param directory: the directory to fetch the listing from + :type directory: str + :returns: A listing using "ls -1" on the unit + :rtype: List[str] + """ + result = zaza.model.run_on_unit(unit_name, "ls -1 {}".format(directory)) + return result['Stdout'].splitlines() diff --git a/zaza/openstack/charm_tests/ceph/osd/tests.py b/zaza/openstack/charm_tests/ceph/osd/tests.py index e3f7be1..e598a5d 100644 --- a/zaza/openstack/charm_tests/ceph/osd/tests.py +++ b/zaza/openstack/charm_tests/ceph/osd/tests.py @@ -16,6 +16,9 @@ import logging import unittest +import re + +from copy import deepcopy import zaza.openstack.charm_tests.test_utils as test_utils import zaza.model as zaza_model @@ -47,3 +50,235 @@ class SecurityTest(unittest.TestCase): expected_passes, expected_failures, expected_to_pass=True) + + +class OsdService: + """Simple representation of ceph-osd systemd service.""" + + def __init__(self, id_): + """ + Init service using its ID. + + e.g.: id_=1 -> ceph-osd@1 + """ + self.id = id_ + self.name = 'ceph-osd@{}'.format(id_) + + +async def async_wait_for_service_status(unit_name, services, target_status, + model_name=None, timeout=2700): + """Wait for all services on the unit to be in the desired state. + + Note: This function emulates the + `zaza.model.async_block_until_service_status` function, but it's using + `systemctl is-active` command instead of `pidof/pgrep` of the original + function. + + :param unit_name: Name of unit to run action on + :type unit_name: str + :param services: List of services to check + :type services: List[str] + :param target_status: State services must be in (stopped or running) + :type target_status: str + :param model_name: Name of model to query. + :type model_name: str + :param timeout: Time to wait for status to be achieved + :type timeout: int + """ + async def _check_service(): + services_ok = True + for service in services: + command = r"systemctl is-active '{}'".format(service) + out = await zaza_model.async_run_on_unit( + unit_name, + command, + model_name=model_name, + timeout=timeout) + response = out['Stdout'].strip() + + if target_status == "running" and response == 'active': + continue + elif target_status == "stopped" and response == 'inactive': + continue + else: + services_ok = False + break + + return services_ok + + accepted_states = ('stopped', 'running') + if target_status not in accepted_states: + raise RuntimeError('Invalid target state "{}". Accepted states: ' + '{}'.format(target_status, accepted_states)) + + async with zaza_model.run_in_model(model_name): + await zaza_model.async_block_until(_check_service, timeout=timeout) + + +wait_for_service = zaza_model.sync_wrapper(async_wait_for_service_status) + + +class ServiceTest(unittest.TestCase): + """ceph-osd systemd service tests.""" + + TESTED_UNIT = 'ceph-osd/0' # This can be any ceph-osd unit in the model + SERVICE_PATTERN = re.compile(r'ceph-osd@(?P\d+)\.service') + + def __init__(self, methodName='runTest'): + """Initialize Test Case.""" + super(ServiceTest, self).__init__(methodName) + self._available_services = None + + @classmethod + def setUpClass(cls): + """Run class setup for running ceph service tests.""" + super(ServiceTest, cls).setUpClass() + + def setUp(self): + """Run test setup.""" + # Skip 'service' action tests on systems without systemd + result = zaza_model.run_on_unit(self.TESTED_UNIT, 'which systemctl') + if not result['Stdout']: + raise unittest.SkipTest("'service' action is not supported on " + "systems without 'systemd'. Skipping " + "tests.") + # Note(mkalcok): This counter reset is needed because ceph-osd service + # is limited to 3 restarts per 30 mins which is insufficient + # when running functional tests for 'service' action. This + # limitation is defined in /lib/systemd/system/ceph-osd@.service + # in section [Service] with options 'StartLimitInterval' and + # 'StartLimitBurst' + reset_counter = 'systemctl reset-failed' + zaza_model.run_on_unit(self.TESTED_UNIT, reset_counter) + + def tearDown(self): + """Start ceph-osd services after each test. + + This ensures that the environment is ready for the next tests. + """ + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start', + action_params={'osds': 'all'}, + raise_on_failure=True) + + @property + def available_services(self): + """Return list of all ceph-osd services present on the TESTED_UNIT.""" + if self._available_services is None: + self._available_services = self._fetch_osd_services() + return self._available_services + + def _fetch_osd_services(self): + """Fetch all ceph-osd services present on the TESTED_UNIT.""" + service_list = [] + service_list_cmd = 'systemctl list-units --full --all ' \ + '--no-pager -t service' + result = zaza_model.run_on_unit(self.TESTED_UNIT, service_list_cmd) + for line in result['Stdout'].split('\n'): + service_name = self.SERVICE_PATTERN.search(line) + if service_name: + service_id = int(service_name.group('service_id')) + service_list.append(OsdService(service_id)) + return service_list + + def test_start_stop_all_by_keyword(self): + """Start and Stop all ceph-osd services using keyword 'all'.""" + service_list = [service.name for service in self.available_services] + + logging.info("Running 'service stop=all' action on {} " + "unit".format(self.TESTED_UNIT)) + zaza_model.run_action_on_units([self.TESTED_UNIT], 'stop', + action_params={'osds': 'all'}) + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_list, + target_status='stopped') + + logging.info("Running 'service start=all' action on {} " + "unit".format(self.TESTED_UNIT)) + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start', + action_params={'osds': 'all'}) + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_list, + target_status='running') + + def test_start_stop_all_by_list(self): + """Start and Stop all ceph-osd services using explicit list.""" + service_list = [service.name for service in self.available_services] + service_ids = [str(service.id) for service in self.available_services] + action_params = ','.join(service_ids) + + logging.info("Running 'service stop={}' action on {} " + "unit".format(action_params, self.TESTED_UNIT)) + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'stop', + action_params={'osds': action_params}) + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_list, + target_status='stopped') + + logging.info("Running 'service start={}' action on {} " + "unit".format(action_params, self.TESTED_UNIT)) + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start', + action_params={'osds': action_params}) + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_list, + target_status='running') + + def test_stop_specific(self): + """Stop only specified ceph-osd service.""" + if len(self.available_services) < 2: + raise unittest.SkipTest('This test can be performed only if ' + 'there\'s more than one ceph-osd service ' + 'present on the tested unit') + + should_run = deepcopy(self.available_services) + to_stop = should_run.pop() + should_run = [service.name for service in should_run] + + logging.info("Running 'service stop={} on {} " + "unit".format(to_stop.id, self.TESTED_UNIT)) + + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'stop', + action_params={'osds': to_stop.id}) + + wait_for_service(unit_name=self.TESTED_UNIT, + services=[to_stop.name, ], + target_status='stopped') + wait_for_service(unit_name=self.TESTED_UNIT, + services=should_run, + target_status='running') + + def test_start_specific(self): + """Start only specified ceph-osd service.""" + if len(self.available_services) < 2: + raise unittest.SkipTest('This test can be performed only if ' + 'there\'s more than one ceph-osd service ' + 'present on the tested unit') + + service_names = [service.name for service in self.available_services] + should_stop = deepcopy(self.available_services) + to_start = should_stop.pop() + should_stop = [service.name for service in should_stop] + + # Note: can't stop ceph-osd.target as restarting a single OSD will + # cause this to start all of the OSDs when a single one starts. + logging.info("Stopping all running ceph-osd services") + service_stop_cmd = '; '.join(['systemctl stop {}'.format(service) + for service in service_names]) + zaza_model.run_on_unit(self.TESTED_UNIT, service_stop_cmd) + + wait_for_service(unit_name=self.TESTED_UNIT, + services=service_names, + target_status='stopped') + + logging.info("Running 'service start={} on {} " + "unit".format(to_start.id, self.TESTED_UNIT)) + + zaza_model.run_action_on_units([self.TESTED_UNIT, ], 'start', + action_params={'osds': to_start.id}) + + wait_for_service(unit_name=self.TESTED_UNIT, + services=[to_start.name, ], + target_status='running') + + wait_for_service(unit_name=self.TESTED_UNIT, + services=should_stop, + target_status='stopped') diff --git a/zaza/openstack/charm_tests/ceph/rbd_mirror/tests.py b/zaza/openstack/charm_tests/ceph/rbd_mirror/tests.py index fa664c3..d8d7967 100644 --- a/zaza/openstack/charm_tests/ceph/rbd_mirror/tests.py +++ b/zaza/openstack/charm_tests/ceph/rbd_mirror/tests.py @@ -17,13 +17,140 @@ import json import logging import re +import cinderclient.exceptions as cinder_exceptions + import zaza.openstack.charm_tests.test_utils as test_utils import zaza.model import zaza.openstack.utilities.ceph import zaza.openstack.utilities.openstack as openstack -from zaza.openstack.charm_tests.glance.setup import LTS_IMAGE_NAME +from zaza.openstack.charm_tests.glance.setup import ( + LTS_IMAGE_NAME, + CIRROS_IMAGE_NAME) + + +DEFAULT_CINDER_RBD_MIRRORING_MODE = 'pool' + + +def get_cinder_rbd_mirroring_mode(cinder_ceph_app_name='cinder-ceph'): + """Get the RBD mirroring mode for the Cinder Ceph pool. + + :param cinder_ceph_app_name: Cinder Ceph Juju application name. + :type cinder_ceph_app_name: str + :returns: A string representing the RBD mirroring mode. It can be + either 'pool' or 'image'. + :rtype: str + """ + rbd_mirroring_mode_config = zaza.model.get_application_config( + cinder_ceph_app_name).get('rbd-mirroring-mode') + if rbd_mirroring_mode_config: + rbd_mirroring_mode = rbd_mirroring_mode_config.get( + 'value', DEFAULT_CINDER_RBD_MIRRORING_MODE).lower() + else: + rbd_mirroring_mode = DEFAULT_CINDER_RBD_MIRRORING_MODE + + return rbd_mirroring_mode + + +def get_glance_image(glance): + """Get the Glance image object to be used by the Ceph tests. + + It looks for the Cirros Glance image, and it's returned if it's found. + If the Cirros image is not found, it will try and find the Ubuntu + LTS image. + + :param glance: Authenticated glanceclient + :type glance: glanceclient.Client + :returns: Glance image object + :rtype: glanceclient.image + """ + images = openstack.get_images_by_name(glance, CIRROS_IMAGE_NAME) + if images: + return images[0] + logging.info("Failed to find {} image, falling back to {}".format( + CIRROS_IMAGE_NAME, + LTS_IMAGE_NAME)) + return openstack.get_images_by_name(glance, LTS_IMAGE_NAME)[0] + + +def setup_cinder_repl_volume_type(cinder, type_name='repl', + backend_name='cinder-ceph'): + """Set up the Cinder volume replication type. + + :param cinder: Authenticated cinderclient + :type cinder: cinder.Client + :param type_name: Cinder volume type name + :type type_name: str + :param backend_name: Cinder volume backend name with replication enabled. + :type backend_name: str + :returns: Cinder volume type object + :rtype: cinderclient.VolumeType + """ + try: + vol_type = cinder.volume_types.find(name=type_name) + except cinder_exceptions.NotFound: + vol_type = cinder.volume_types.create(type_name) + + vol_type.set_keys(metadata={ + 'volume_backend_name': backend_name, + 'replication_enabled': ' True', + }) + return vol_type + + +# TODO: This function should be incorporated into +# 'zaza.openstack.utilities.openstack.create_volume' helper, once the below +# flakiness comments are addressed. +def create_cinder_volume(cinder, name='zaza', image_id=None, type_id=None): + """Create a new Cinder volume. + + :param cinder: Authenticated cinderclient. + :type cinder: cinder.Client + :param name: Volume name. + :type name: str + :param image_id: Glance image id, if the volume is created from image. + :type image_id: str + :param type_id: Cinder Volume type id, if the volume needs to use an + explicit volume type. + :type type_id: boolean + :returns: Cinder volume + :rtype: :class:`Volume`. + """ + # NOTE(fnordahl): for some reason create volume from image often fails + # when run just after deployment is finished. We should figure out + # why, resolve the underlying issue and then remove this. + # + # We do not use tenacity here as it will interfere with tenacity used + # in ``resource_reaches_status`` + def create_volume(cinder, volume_params, retry=20): + if retry < 1: + return + volume = cinder.volumes.create(**volume_params) + try: + # Note(coreycb): stop_after_attempt is increased because using + # juju storage for ceph-osd backed by cinder on undercloud + # takes longer than the prior method of directory-backed OSD + # devices. + openstack.resource_reaches_status( + cinder.volumes, volume.id, msg='volume', + stop_after_attempt=20) + return volume + except AssertionError: + logging.info('retrying') + volume.delete() + return create_volume(cinder, volume_params, retry=retry - 1) + + volume_params = { + 'size': 8, + 'name': name, + } + if image_id: + volume_params['imageRef'] = image_id + if type_id: + volume_params['volume_type'] = type_id + + return create_volume(cinder, volume_params) class CephRBDMirrorBase(test_utils.OpenStackBaseTest): @@ -33,20 +160,26 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest): def setUpClass(cls): """Run setup for ``ceph-rbd-mirror`` tests.""" super().setUpClass() + cls.cinder_ceph_app_name = 'cinder-ceph' + cls.test_cinder_volume_name = 'test-cinder-ceph-volume' # get ready for multi-model Zaza cls.site_a_model = cls.site_b_model = zaza.model.get_juju_model() cls.site_b_app_suffix = '-b' - def run_status_action(self, application_name=None, model_name=None): + def run_status_action(self, application_name=None, model_name=None, + pools=[]): """Run status action, decode and return response.""" + action_params = { + 'verbose': True, + 'format': 'json', + } + if len(pools) > 0: + action_params['pools'] = ','.join(pools) result = zaza.model.run_action_on_leader( application_name or self.application_name, 'status', model_name=model_name, - action_params={ - 'verbose': True, - 'format': 'json', - }) + action_params=action_params) return json.loads(result.results['output']) def get_pools(self): @@ -66,10 +199,26 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest): model_name=self.site_b_model) return sorted(site_a_pools.keys()), sorted(site_b_pools.keys()) + def get_failover_pools(self): + """Get the failover Ceph pools' names, from both sites. + + If the Cinder RBD mirroring mode is 'image', the 'cinder-ceph' pool + needs to be excluded, since Cinder orchestrates the failover then. + + :returns: Tuple with site-a pools and site-b pools. + :rtype: Tuple[List[str], List[str]] + """ + site_a_pools, site_b_pools = self.get_pools() + if get_cinder_rbd_mirroring_mode(self.cinder_ceph_app_name) == 'image': + site_a_pools.remove(self.cinder_ceph_app_name) + site_b_pools.remove(self.cinder_ceph_app_name) + return site_a_pools, site_b_pools + def wait_for_mirror_state(self, state, application_name=None, model_name=None, check_entries_behind_master=False, - require_images_in=[]): + require_images_in=[], + pools=[]): """Wait until all images reach requested state. This function runs the ``status`` action and examines the data it @@ -88,6 +237,9 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest): :type check_entries_behind_master: bool :param require_images_in: List of pools to require images in :type require_images_in: list of str + :param pools: List of pools to run status on. If this is empty, the + status action will run on all the pools. + :type pools: list of str :returns: True on success, never returns on failure """ rep = re.compile(r'.*entries_behind_master=(\d+)') @@ -95,7 +247,8 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest): try: # encapsulate in try except to work around LP: #1820976 pool_status = self.run_status_action( - application_name=application_name, model_name=model_name) + application_name=application_name, model_name=model_name, + pools=pools) except KeyError: continue for pool, status in pool_status.items(): @@ -122,6 +275,41 @@ class CephRBDMirrorBase(test_utils.OpenStackBaseTest): # all images with state has expected state return True + def setup_test_cinder_volume(self): + """Set up the test Cinder volume into the Ceph RBD mirror environment. + + If the volume already exists, then it's returned. + + Also, if the Cinder RBD mirroring mode is 'image', the volume will + use an explicit volume type with the appropriate replication flags. + Otherwise, it is just a simple Cinder volume using the default backend. + + :returns: Cinder volume + :rtype: :class:`Volume`. + """ + session = openstack.get_overcloud_keystone_session() + cinder = openstack.get_cinder_session_client(session, version=3) + + try: + return cinder.volumes.find(name=self.test_cinder_volume_name) + except cinder_exceptions.NotFound: + logging.info("Test Cinder volume doesn't exist. Creating it") + + glance = openstack.get_glance_session_client(session) + image = get_glance_image(glance) + kwargs = { + 'cinder': cinder, + 'name': self.test_cinder_volume_name, + 'image_id': image.id, + } + if get_cinder_rbd_mirroring_mode(self.cinder_ceph_app_name) == 'image': + volume_type = setup_cinder_repl_volume_type( + cinder, + backend_name=self.cinder_ceph_app_name) + kwargs['type_id'] = volume_type.id + + return create_cinder_volume(**kwargs) + class CephRBDMirrorTest(CephRBDMirrorBase): """Encapsulate ``ceph-rbd-mirror`` tests.""" @@ -193,32 +381,7 @@ class CephRBDMirrorTest(CephRBDMirrorBase): site B and subsequently comparing the contents we get a full end to end test. """ - session = openstack.get_overcloud_keystone_session() - glance = openstack.get_glance_session_client(session) - cinder = openstack.get_cinder_session_client(session) - - image = next(glance.images.list(name=LTS_IMAGE_NAME)) - - # NOTE(fnordahl): for some reason create volume from image often fails - # when run just after deployment is finished. We should figure out - # why, resolve the underlying issue and then remove this. - # - # We do not use tenacity here as it will interfere with tenacity used - # in ``resource_reaches_status`` - def create_volume_from_image(cinder, image, retry=5): - if retry < 1: - return - volume = cinder.volumes.create(8, name='zaza', imageRef=image.id) - try: - openstack.resource_reaches_status( - cinder.volumes, volume.id, msg='volume') - return volume - except AssertionError: - logging.info('retrying') - volume.delete() - return create_volume_from_image(cinder, image, retry=retry - 1) - volume = create_volume_from_image(cinder, image) - + volume = self.setup_test_cinder_volume() site_a_hash = zaza.openstack.utilities.ceph.get_rbd_hash( zaza.model.get_lead_unit_name('ceph-mon', model_name=self.site_a_model), @@ -230,6 +393,8 @@ class CephRBDMirrorTest(CephRBDMirrorBase): check_entries_behind_master=True, application_name=self.application_name + self.site_b_app_suffix, model_name=self.site_b_model) + logging.info('Checking the Ceph RBD hashes of the primary and ' + 'the secondary Ceph images') site_b_hash = zaza.openstack.utilities.ceph.get_rbd_hash( zaza.model.get_lead_unit_name('ceph-mon' + self.site_b_app_suffix, model_name=self.site_b_model), @@ -244,102 +409,399 @@ class CephRBDMirrorTest(CephRBDMirrorBase): class CephRBDMirrorControlledFailoverTest(CephRBDMirrorBase): """Encapsulate ``ceph-rbd-mirror`` controlled failover tests.""" - def test_fail_over_fall_back(self): - """Validate controlled fail over and fall back.""" - site_a_pools, site_b_pools = self.get_pools() + def execute_failover_juju_actions(self, + primary_site_app_name, + primary_site_model, + primary_site_pools, + secondary_site_app_name, + secondary_site_model, + secondary_site_pools): + """Execute the failover Juju actions. + + The failover / failback via Juju actions shares the same workflow. The + failback is just a failover with sites in reversed order. + + This function encapsulates the tasks to failover a primary site to + a secondary site: + 1. Demote primary site + 2. Validation of the primary site demotion + 3. Promote secondary site + 4. Validation of the secondary site promotion + + :param primary_site_app_name: Primary site Ceph RBD mirror app name. + :type primary_site_app_name: str + :param primary_site_model: Primary site Juju model name. + :type primary_site_model: str + :param primary_site_pools: Primary site pools. + :type primary_site_pools: List[str] + :param secondary_site_app_name: Secondary site Ceph RBD mirror + app name. + :type secondary_site_app_name: str + :param secondary_site_model: Secondary site Juju model name. + :type secondary_site_model: str + :param secondary_site_pools: Secondary site pools. + :type secondary_site_pools: List[str] + """ + # Check if primary and secondary pools sizes are the same. + self.assertEqual(len(primary_site_pools), len(secondary_site_pools)) + + # Run the 'demote' Juju action against the primary site pools. + logging.info('Demoting {} from model {}.'.format( + primary_site_app_name, primary_site_model)) result = zaza.model.run_action_on_leader( - 'ceph-rbd-mirror', + primary_site_app_name, 'demote', - model_name=self.site_a_model, - action_params={}) + model_name=primary_site_model, + action_params={ + 'pools': ','.join(primary_site_pools) + }) logging.info(result.results) + self.assertEqual(int(result.results['Code']), 0) + + # Validate that the demoted pools count matches the total primary site + # pools count. n_pools_demoted = len(result.results['output'].split('\n')) - self.assertEqual(len(site_a_pools), n_pools_demoted) - self.wait_for_mirror_state('up+unknown', model_name=self.site_a_model) + self.assertEqual(len(primary_site_pools), n_pools_demoted) + + # At this point, both primary and secondary sites are demoted. Validate + # that the Ceph images, from both sites, report 'up+unknown', since + # there isn't a primary site at the moment. + logging.info('Waiting until {} is demoted.'.format( + primary_site_app_name)) self.wait_for_mirror_state( 'up+unknown', - application_name=self.application_name + self.site_b_app_suffix, - model_name=self.site_b_model) + application_name=primary_site_app_name, + model_name=primary_site_model, + pools=primary_site_pools) + self.wait_for_mirror_state( + 'up+unknown', + application_name=secondary_site_app_name, + model_name=secondary_site_model, + pools=secondary_site_pools) + + # Run the 'promote' Juju against the secondary site. + logging.info('Promoting {} from model {}.'.format( + secondary_site_app_name, secondary_site_model)) result = zaza.model.run_action_on_leader( - 'ceph-rbd-mirror' + self.site_b_app_suffix, + secondary_site_app_name, 'promote', - model_name=self.site_b_model, - action_params={}) + model_name=secondary_site_model, + action_params={ + 'pools': ','.join(secondary_site_pools) + }) logging.info(result.results) + self.assertEqual(int(result.results['Code']), 0) + + # Validate that the promoted pools count matches the total secondary + # site pools count. n_pools_promoted = len(result.results['output'].split('\n')) - self.assertEqual(len(site_b_pools), n_pools_promoted) + self.assertEqual(len(secondary_site_pools), n_pools_promoted) + + # Validate that the Ceph images from the newly promoted site + # report 'up+stopped' state (which is reported by primary Ceph images). + logging.info('Waiting until {} is promoted.'.format( + secondary_site_app_name)) + self.wait_for_mirror_state( + 'up+stopped', + application_name=secondary_site_app_name, + model_name=secondary_site_model, + pools=secondary_site_pools) + + # Validate that the Ceph images from site-a report 'up+replaying' + # (which is reported by secondary Ceph images). self.wait_for_mirror_state( 'up+replaying', - model_name=self.site_a_model) + check_entries_behind_master=True, + application_name=primary_site_app_name, + model_name=primary_site_model, + pools=primary_site_pools) + + def test_100_cinder_failover(self): + """Validate controlled failover via the Cinder API. + + This test only makes sense if Cinder RBD mirroring mode is 'image'. + It will return early, if this is not the case. + """ + cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode( + self.cinder_ceph_app_name) + if cinder_rbd_mirroring_mode != 'image': + logging.warning( + "Skipping 'test_100_cinder_failover' since Cinder RBD " + "mirroring mode is {}.".format(cinder_rbd_mirroring_mode)) + return + + session = openstack.get_overcloud_keystone_session() + cinder = openstack.get_cinder_session_client(session, version=3) + + # Check if the Cinder volume host is available with replication + # enabled. + host = 'cinder@{}'.format(self.cinder_ceph_app_name) + svc = cinder.services.list(host=host, binary='cinder-volume')[0] + self.assertEqual(svc.replication_status, 'enabled') + self.assertEqual(svc.status, 'enabled') + + # Setup the test Cinder volume + volume = self.setup_test_cinder_volume() + + # Check if the volume is properly mirrored self.wait_for_mirror_state( - 'up+stopped', + 'up+replaying', + check_entries_behind_master=True, application_name=self.application_name + self.site_b_app_suffix, - model_name=self.site_b_model) - result = zaza.model.run_action_on_leader( - 'ceph-rbd-mirror' + self.site_b_app_suffix, - 'demote', model_name=self.site_b_model, - action_params={ - }) - logging.info(result.results) - n_pools_demoted = len(result.results['output'].split('\n')) - self.assertEqual(len(site_a_pools), n_pools_demoted) - self.wait_for_mirror_state( - 'up+unknown', - model_name=self.site_a_model) - self.wait_for_mirror_state( - 'up+unknown', - application_name=self.application_name + self.site_b_app_suffix, - model_name=self.site_b_model) + pools=[self.cinder_ceph_app_name]) + + # Execute the Cinder volume failover + openstack.failover_cinder_volume_host( + cinder=cinder, + backend_name=self.cinder_ceph_app_name, + target_backend_id='ceph', + target_status='disabled', + target_replication_status='failed-over') + + # Check if the test volume is still available after failover + self.assertEqual(cinder.volumes.get(volume.id).status, 'available') + + def test_101_cinder_failback(self): + """Validate controlled failback via the Cinder API. + + This test only makes sense if Cinder RBD mirroring mode is 'image'. + It will return early, if this is not the case. + + The test needs to be executed when the Cinder volume host is already + failed-over with the test volume on it. + """ + cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode( + self.cinder_ceph_app_name) + if cinder_rbd_mirroring_mode != 'image': + logging.warning( + "Skipping 'test_101_cinder_failback' since Cinder RBD " + "mirroring mode is {}.".format(cinder_rbd_mirroring_mode)) + return + + session = openstack.get_overcloud_keystone_session() + cinder = openstack.get_cinder_session_client(session, version=3) + + # Check if the Cinder volume host is already failed-over + host = 'cinder@{}'.format(self.cinder_ceph_app_name) + svc = cinder.services.list(host=host, binary='cinder-volume')[0] + self.assertEqual(svc.replication_status, 'failed-over') + self.assertEqual(svc.status, 'disabled') + + # Check if the test Cinder volume is already present. The method + # 'cinder.volumes.find' raises 404 if the volume is not found. + volume = cinder.volumes.find(name=self.test_cinder_volume_name) + + # Execute the Cinder volume failback + openstack.failover_cinder_volume_host( + cinder=cinder, + backend_name=self.cinder_ceph_app_name, + target_backend_id='default', + target_status='enabled', + target_replication_status='enabled') + + # Check if the test volume is still available after failback + self.assertEqual(cinder.volumes.get(volume.id).status, 'available') + + def test_200_juju_failover(self): + """Validate controlled failover via Juju actions.""" + # Get the Ceph pools needed to failover + site_a_pools, site_b_pools = self.get_failover_pools() + + # Execute the failover Juju actions with the appropriate parameters. + site_b_app_name = self.application_name + self.site_b_app_suffix + self.execute_failover_juju_actions( + primary_site_app_name=self.application_name, + primary_site_model=self.site_a_model, + primary_site_pools=site_a_pools, + secondary_site_app_name=site_b_app_name, + secondary_site_model=self.site_b_model, + secondary_site_pools=site_b_pools) + + def test_201_juju_failback(self): + """Validate controlled failback via Juju actions.""" + # Get the Ceph pools needed to failback + site_a_pools, site_b_pools = self.get_failover_pools() + + # Execute the failover Juju actions with the appropriate parameters. + # The failback operation is just a failover with sites in reverse + # order. + site_b_app_name = self.application_name + self.site_b_app_suffix + self.execute_failover_juju_actions( + primary_site_app_name=site_b_app_name, + primary_site_model=self.site_b_model, + primary_site_pools=site_b_pools, + secondary_site_app_name=self.application_name, + secondary_site_model=self.site_a_model, + secondary_site_pools=site_a_pools) + + def test_203_juju_resync(self): + """Validate the 'resync-pools' Juju action. + + The 'resync-pools' Juju action is meant to flag Ceph images from the + secondary site to re-sync against the Ceph images from the primary + site. + + This use case is useful when the Ceph secondary images are out of sync. + """ + # Get the Ceph pools needed to failback + _, site_b_pools = self.get_failover_pools() + + # Run the 'resync-pools' Juju action against the pools from site-b. + # This will make sure that the Ceph images from site-b are properly + # synced with the primary images from site-a. + site_b_app_name = self.application_name + self.site_b_app_suffix + logging.info('Re-syncing {} from model {}'.format( + site_b_app_name, self.site_b_model)) result = zaza.model.run_action_on_leader( - 'ceph-rbd-mirror', - 'promote', - model_name=self.site_a_model, - action_params={ - }) - logging.info(result.results) - n_pools_promoted = len(result.results['output'].split('\n')) - self.assertEqual(len(site_b_pools), n_pools_promoted) - self.wait_for_mirror_state( - 'up+stopped', - model_name=self.site_a_model) - result = zaza.model.run_action_on_leader( - 'ceph-rbd-mirror' + self.site_b_app_suffix, + site_b_app_name, 'resync-pools', model_name=self.site_b_model, action_params={ + 'pools': ','.join(site_b_pools), 'i-really-mean-it': True, }) logging.info(result.results) + self.assertEqual(int(result.results['Code']), 0) + + # Validate that the Ceph images from site-b report 'up+replaying' + # (which is reported by secondary Ceph images). And check that images + # exist in Cinder and Glance pools. self.wait_for_mirror_state( 'up+replaying', - application_name=self.application_name + self.site_b_app_suffix, + check_entries_behind_master=True, + application_name=site_b_app_name, model_name=self.site_b_model, - require_images_in=['cinder-ceph', 'glance']) + require_images_in=[self.cinder_ceph_app_name, 'glance'], + pools=site_b_pools) class CephRBDMirrorDisasterFailoverTest(CephRBDMirrorBase): """Encapsulate ``ceph-rbd-mirror`` destructive tests.""" - def test_kill_site_a_fail_over(self): - """Validate fail over after uncontrolled shutdown of primary.""" - for application in 'ceph-rbd-mirror', 'ceph-mon', 'ceph-osd': + def apply_cinder_ceph_workaround(self): + """Set minimal timeouts / retries to the Cinder Ceph backend. + + This is needed because the failover via Cinder API will try to do a + demotion of the site-a. However, when site-a is down, and with the + default timeouts / retries, the operation takes an unreasonably amount + of time (or sometimes it never finishes). + """ + # These new config options need to be set under the Cinder Ceph backend + # section in the main Cinder config file. + # At the moment, we don't the possibility of using Juju config to set + # these options. And also, it's not even a good practice to have them + # in production. + # These should be set only to do the Ceph failover via Cinder API, and + # they need to be removed after. + configs = { + 'rados_connect_timeout': '1', + 'rados_connection_retries': '1', + 'rados_connection_interval': '0', + 'replication_connect_timeout': '1', + } + + # Small Python script that will be executed via Juju run to update + # the Cinder config file. + update_cinder_conf_script = ( + "import configparser; " + "config = configparser.ConfigParser(); " + "config.read('/etc/cinder/cinder.conf'); " + "{}" + "f = open('/etc/cinder/cinder.conf', 'w'); " + "config.write(f); " + "f.close()") + set_cmd = '' + for cfg_name in configs: + set_cmd += "config.set('{0}', '{1}', '{2}'); ".format( + self.cinder_ceph_app_name, cfg_name, configs[cfg_name]) + script = update_cinder_conf_script.format(set_cmd) + + # Run the workaround script via Juju run + zaza.model.run_on_leader( + self.cinder_ceph_app_name, + 'python3 -c "{}"; systemctl restart cinder-volume'.format(script)) + + def kill_primary_site(self): + """Simulate an unexpected primary site shutdown.""" + logging.info('Killing the Ceph primary site') + for application in ['ceph-rbd-mirror', 'ceph-mon', 'ceph-osd']: zaza.model.remove_application( application, model_name=self.site_a_model, forcefully_remove_machines=True) + + def test_100_forced_juju_failover(self): + """Validate Ceph failover via Juju when the primary site is down. + + * Kill the primary site + * Execute the forced failover via Juju actions + """ + # Get the site-b Ceph pools that need to be promoted + _, site_b_pools = self.get_failover_pools() + site_b_app_name = self.application_name + self.site_b_app_suffix + + # Simulate primary site unexpected shutdown + self.kill_primary_site() + + # Try and promote the site-b to primary. result = zaza.model.run_action_on_leader( - 'ceph-rbd-mirror' + self.site_b_app_suffix, + site_b_app_name, 'promote', model_name=self.site_b_model, action_params={ + 'pools': ','.join(site_b_pools), }) + self.assertEqual(int(result.results['Code']), 0) + + # The site-b 'promote' Juju action is expected to fail, because the + # primary site is down. self.assertEqual(result.status, 'failed') + + # Retry to promote site-b using the 'force' Juju action parameter. result = zaza.model.run_action_on_leader( - 'ceph-rbd-mirror' + self.site_b_app_suffix, + site_b_app_name, 'promote', model_name=self.site_b_model, action_params={ 'force': True, + 'pools': ','.join(site_b_pools), }) + self.assertEqual(int(result.results['Code']), 0) + + # Validate successful Juju action execution self.assertEqual(result.status, 'completed') + + def test_200_forced_cinder_failover(self): + """Validate Ceph failover via Cinder when the primary site is down. + + This test only makes sense if Cinder RBD mirroring mode is 'image'. + It will return early, if this is not the case. + + This assumes that the primary site is already killed. + """ + cinder_rbd_mirroring_mode = get_cinder_rbd_mirroring_mode( + self.cinder_ceph_app_name) + if cinder_rbd_mirroring_mode != 'image': + logging.warning( + "Skipping 'test_200_cinder_failover_without_primary_site' " + "since Cinder RBD mirroring mode is {}.".format( + cinder_rbd_mirroring_mode)) + return + + # Make sure that the Cinder Ceph backend workaround is applied. + self.apply_cinder_ceph_workaround() + + session = openstack.get_overcloud_keystone_session() + cinder = openstack.get_cinder_session_client(session, version=3) + openstack.failover_cinder_volume_host( + cinder=cinder, + backend_name=self.cinder_ceph_app_name, + target_backend_id='ceph', + target_status='disabled', + target_replication_status='failed-over') + + # Check that the Cinder volumes are still available after forced + # failover. + for volume in cinder.volumes.list(): + self.assertEqual(volume.status, 'available') diff --git a/zaza/openstack/charm_tests/ceph/setup.py b/zaza/openstack/charm_tests/ceph/setup.py index c53ff3c..87f213d 100644 --- a/zaza/openstack/charm_tests/ceph/setup.py +++ b/zaza/openstack/charm_tests/ceph/setup.py @@ -14,7 +14,23 @@ """Setup for ceph-osd deployments.""" +import logging +import zaza.model + def basic_setup(): """Run basic setup for ceph-osd.""" pass + + +def ceph_ready(): + """Wait for ceph to be ready. + + Wait for ceph to be ready. This is useful if the target_deploy_status in + the tests.yaml is expecting ceph to be in a blocked state. After ceph + has been unblocked the deploy may need to wait for ceph to be ready. + """ + logging.info("Waiting for ceph units to settle") + zaza.model.wait_for_application_states() + zaza.model.block_until_all_units_idle() + logging.info("Ceph units settled") diff --git a/zaza/openstack/charm_tests/ceph/tests.py b/zaza/openstack/charm_tests/ceph/tests.py index f8bd0c1..27fefbd 100644 --- a/zaza/openstack/charm_tests/ceph/tests.py +++ b/zaza/openstack/charm_tests/ceph/tests.py @@ -15,11 +15,13 @@ """Ceph Testing.""" import unittest +import json import logging from os import ( listdir, path ) +import requests import tempfile import tenacity @@ -31,7 +33,7 @@ import zaza.model as zaza_model import zaza.openstack.utilities.ceph as zaza_ceph import zaza.openstack.utilities.exceptions as zaza_exceptions import zaza.openstack.utilities.generic as zaza_utils -import zaza.openstack.utilities.juju as zaza_juju +import zaza.utilities.juju as juju_utils import zaza.openstack.utilities.openstack as zaza_openstack @@ -56,7 +58,7 @@ class CephLowLevelTest(test_utils.OpenStackBaseTest): } ceph_osd_processes = { - 'ceph-osd': [2, 3] + 'ceph-osd': [1, 2, 3] } # Units with process names and PID quantities expected @@ -95,6 +97,16 @@ class CephLowLevelTest(test_utils.OpenStackBaseTest): target_status='running' ) + @test_utils.skipUntilVersion('ceph-mon', 'ceph', '14.2.0') + def test_pg_tuning(self): + """Verify that auto PG tuning is enabled for Nautilus+.""" + unit_name = 'ceph-mon/0' + cmd = "ceph osd pool autoscale-status --format=json" + result = zaza_model.run_on_unit(unit_name, cmd) + self.assertEqual(result['Code'], '0') + for pool in json.loads(result['Stdout']): + self.assertEqual(pool['pg_autoscale_mode'], 'on') + class CephRelationTest(test_utils.OpenStackBaseTest): """Ceph's relations test class.""" @@ -112,7 +124,7 @@ class CephRelationTest(test_utils.OpenStackBaseTest): relation_name = 'osd' remote_unit = zaza_model.get_unit_from_name(remote_unit_name) remote_ip = remote_unit.public_address - relation = zaza_juju.get_relation_from_unit( + relation = juju_utils.get_relation_from_unit( unit_name, remote_unit_name, relation_name @@ -138,11 +150,10 @@ class CephRelationTest(test_utils.OpenStackBaseTest): fsid = result.get('Stdout').strip() expected = { 'private-address': remote_ip, - 'auth': 'none', 'ceph-public-address': remote_ip, 'fsid': fsid, } - relation = zaza_juju.get_relation_from_unit( + relation = juju_utils.get_relation_from_unit( unit_name, remote_unit_name, relation_name @@ -360,6 +371,19 @@ class CephTest(test_utils.OpenStackBaseTest): As the ephemeral device will have data on it we can use it to validate that these checks work as intended. """ + current_release = zaza_openstack.get_os_release() + focal_ussuri = zaza_openstack.get_os_release('focal_ussuri') + if current_release >= focal_ussuri: + # NOTE(ajkavanagh) - focal (on ServerStack) is broken for /dev/vdb + # and so this test can't pass: LP#1842751 discusses the issue, but + # basically the snapd daemon along with lxcfs results in /dev/vdb + # being mounted in the lxcfs process namespace. If the charm + # 'tries' to umount it, it can (as root), but the mount is still + # 'held' by lxcfs and thus nothing else can be done with it. This + # is only a problem in serverstack with images with a default + # /dev/vdb ephemeral + logging.warn("Skipping pristine disk test for focal and higher") + return logging.info('Checking behaviour when non-pristine disks appear...') logging.info('Configuring ephemeral-unmount...') alternate_conf = { @@ -408,9 +432,14 @@ class CephTest(test_utils.OpenStackBaseTest): set_default = { 'ephemeral-unmount': '', - 'osd-devices': '/dev/vdb /srv/ceph', + 'osd-devices': '/dev/vdb', } + current_release = zaza_openstack.get_os_release() + bionic_train = zaza_openstack.get_os_release('bionic_train') + if current_release < bionic_train: + set_default['osd-devices'] = '/dev/vdb /srv/ceph' + logging.info('Restoring to default configuration...') zaza_model.set_application_config(juju_service, set_default) @@ -515,7 +544,7 @@ class CephRGWTest(test_utils.OpenStackBaseTest): @classmethod def setUpClass(cls): """Run class setup for running ceph low level tests.""" - super(CephRGWTest, cls).setUpClass() + super(CephRGWTest, cls).setUpClass(application_name='ceph-radosgw') @property def expected_apps(self): @@ -577,6 +606,12 @@ class CephRGWTest(test_utils.OpenStackBaseTest): target_status='running' ) + # When testing with TLS there is a chance the deployment will appear done + # and idle prior to ceph-radosgw and Keystone have updated the service + # catalog. Retry the test in this circumstance. + @tenacity.retry(wait=tenacity.wait_exponential(multiplier=10, max=300), + reraise=True, stop=tenacity.stop_after_attempt(10), + retry=tenacity.retry_if_exception_type(IOError)) def test_object_storage(self): """Verify object storage API. @@ -587,10 +622,13 @@ class CephRGWTest(test_utils.OpenStackBaseTest): 'multisite configuration') logging.info('Checking Swift REST API') keystone_session = zaza_openstack.get_overcloud_keystone_session() - region_name = 'RegionOne' + region_name = zaza_model.get_application_config( + self.application_name, + model_name=self.model_name)['region']['value'] swift_client = zaza_openstack.get_swift_session_client( keystone_session, - region_name + region_name, + cacert=self.cacert, ) _container = 'demo-container' _test_data = 'Test data from Zaza' @@ -614,7 +652,8 @@ class CephRGWTest(test_utils.OpenStackBaseTest): keystone_session = zaza_openstack.get_overcloud_keystone_session() source_client = zaza_openstack.get_swift_session_client( keystone_session, - region_name='east-1' + region_name='east-1', + cacert=self.cacert, ) _container = 'demo-container' _test_data = 'Test data from Zaza' @@ -628,7 +667,8 @@ class CephRGWTest(test_utils.OpenStackBaseTest): target_client = zaza_openstack.get_swift_session_client( keystone_session, - region_name='east-1' + region_name='east-1', + cacert=self.cacert, ) @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60), @@ -660,11 +700,13 @@ class CephRGWTest(test_utils.OpenStackBaseTest): keystone_session = zaza_openstack.get_overcloud_keystone_session() source_client = zaza_openstack.get_swift_session_client( keystone_session, - region_name='east-1' + region_name='east-1', + cacert=self.cacert, ) target_client = zaza_openstack.get_swift_session_client( keystone_session, - region_name='west-1' + region_name='west-1', + cacert=self.cacert, ) zaza_model.run_action_on_leader( 'slave-ceph-radosgw', @@ -706,7 +748,269 @@ class CephProxyTest(unittest.TestCase): def test_ceph_health(self): """Make sure ceph-proxy can communicate with ceph.""" + logging.info('Wait for idle/ready status...') + zaza_model.wait_for_application_states() + self.assertEqual( zaza_model.run_on_leader("ceph-proxy", "sudo ceph health")["Code"], "0" ) + + def test_cinder_ceph_restrict_pool_setup(self): + """Make sure cinder-ceph restrict pool was created successfully.""" + logging.info('Wait for idle/ready status...') + zaza_model.wait_for_application_states() + + pools = zaza_ceph.get_ceph_pools('ceph-mon/0') + if 'cinder-ceph' not in pools: + msg = 'cinder-ceph pool was not found upon querying ceph-mon/0' + raise zaza_exceptions.CephPoolNotFound(msg) + + # Checking for cinder-ceph specific permissions makes + # the test more rugged when we add additional relations + # to ceph for other applications (such as glance and nova). + expected_permissions = [ + "allow rwx pool=cinder-ceph", + "allow class-read object_prefix rbd_children", + ] + cmd = "sudo ceph auth get client.cinder-ceph" + result = zaza_model.run_on_unit('ceph-mon/0', cmd) + output = result.get('Stdout').strip() + + for expected in expected_permissions: + if expected not in output: + msg = ('cinder-ceph pool restriction ({}) was not' + ' configured correctly.' + ' Found: {}'.format(expected, output)) + raise zaza_exceptions.CephPoolNotConfigured(msg) + + +class CephPrometheusTest(unittest.TestCase): + """Test the Ceph <-> Prometheus relation.""" + + def test_prometheus_metrics(self): + """Validate that Prometheus has Ceph metrics.""" + try: + zaza_model.get_application( + 'prometheus2') + except KeyError: + raise unittest.SkipTest('Prometheus not present, skipping test') + unit = zaza_model.get_unit_from_name( + zaza_model.get_lead_unit_name('prometheus2')) + self.assertEqual( + '3', _get_mon_count_from_prometheus(unit.public_address)) + + +class CephPoolConfig(Exception): + """Custom Exception for bad Ceph pool config.""" + + pass + + +class CheckPoolTypes(unittest.TestCase): + """Test the ceph pools created for clients are of the expected type.""" + + def test_check_pool_types(self): + """Check type of pools created for clients.""" + app_pools = [ + ('glance', 'glance'), + ('nova-compute', 'nova'), + ('cinder-ceph', 'cinder-ceph')] + runtime_pool_details = zaza_ceph.get_ceph_pool_details() + for app, pool_name in app_pools: + try: + app_config = zaza_model.get_application_config(app) + except KeyError: + logging.info( + 'Skipping pool check of %s, application %s not present', + pool_name, + app) + continue + rel_id = zaza_model.get_relation_id( + app, + 'ceph-mon', + remote_interface_name='client') + if not rel_id: + logging.info( + 'Skipping pool check of %s, ceph relation not present', + app) + continue + juju_pool_config = app_config.get('pool-type') + if juju_pool_config: + expected_pool_type = juju_pool_config['value'] + else: + # If the pool-type option is absent assume the default of + # replicated. + expected_pool_type = zaza_ceph.REPLICATED_POOL_TYPE + for pool_config in runtime_pool_details: + if pool_config['pool_name'] == pool_name: + logging.info('Checking {} is {}'.format( + pool_name, + expected_pool_type)) + expected_pool_code = -1 + if expected_pool_type == zaza_ceph.REPLICATED_POOL_TYPE: + expected_pool_code = zaza_ceph.REPLICATED_POOL_CODE + elif expected_pool_type == zaza_ceph.ERASURE_POOL_TYPE: + expected_pool_code = zaza_ceph.ERASURE_POOL_CODE + self.assertEqual( + pool_config['type'], + expected_pool_code) + break + else: + raise CephPoolConfig( + "Failed to find config for {}".format(pool_name)) + + +# NOTE: We might query before prometheus has fetch data +@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, + min=5, max=10), + reraise=True) +def _get_mon_count_from_prometheus(prometheus_ip): + url = ('http://{}:9090/api/v1/query?query=' + 'count(ceph_mon_metadata)'.format(prometheus_ip)) + client = requests.session() + response = client.get(url) + logging.debug("Prometheus response: {}".format(response.json())) + return response.json()['data']['result'][0]['value'][1] + + +class BlueStoreCompressionCharmOperation(test_utils.BaseCharmTest): + """Test charm handling of bluestore compression configuration options.""" + + @classmethod + def setUpClass(cls): + """Perform class one time initialization.""" + super(BlueStoreCompressionCharmOperation, cls).setUpClass() + release_application = 'keystone' + try: + zaza_model.get_application(release_application) + except KeyError: + release_application = 'ceph-mon' + cls.current_release = zaza_openstack.get_os_release( + application=release_application) + cls.bionic_rocky = zaza_openstack.get_os_release('bionic_rocky') + + def setUp(self): + """Perform common per test initialization steps.""" + super(BlueStoreCompressionCharmOperation, self).setUp() + + # determine if the tests should be run or not + logging.debug('os_release: {} >= {} = {}' + .format(self.current_release, + self.bionic_rocky, + self.current_release >= self.bionic_rocky)) + self.mimic_or_newer = self.current_release >= self.bionic_rocky + + def _assert_pools_properties(self, pools, pools_detail, + expected_properties, log_func=logging.info): + """Check properties on a set of pools. + + :param pools: List of pool names to check. + :type pools: List[str] + :param pools_detail: List of dictionaries with pool detail + :type pools_detail List[Dict[str,any]] + :param expected_properties: Properties to check and their expected + values. + :type expected_properties: Dict[str,any] + :returns: Nothing + :raises: AssertionError + """ + for pool in pools: + for pd in pools_detail: + if pd['pool_name'] == pool: + if 'options' in expected_properties: + for k, v in expected_properties['options'].items(): + self.assertEquals(pd['options'][k], v) + log_func("['options']['{}'] == {}".format(k, v)) + for k, v in expected_properties.items(): + if k == 'options': + continue + self.assertEquals(pd[k], v) + log_func("{} == {}".format(k, v)) + + def test_configure_compression(self): + """Enable compression and validate properties flush through to pool.""" + if not self.mimic_or_newer: + logging.info('Skipping test, Mimic or newer required.') + return + if self.application_name == 'ceph-osd': + # The ceph-osd charm itself does not request pools, neither does + # the BlueStore Compression configuration options it have affect + # pool properties. + logging.info('test does not apply to ceph-osd charm.') + return + elif self.application_name == 'ceph-radosgw': + # The Ceph RadosGW creates many light weight pools to keep track of + # metadata, we only compress the pool containing actual data. + app_pools = ['.rgw.buckets.data'] + else: + # Retrieve which pools the charm under test has requested skipping + # metadata pools as they are deliberately not compressed. + app_pools = [ + pool + for pool in zaza_ceph.get_pools_from_broker_req( + self.application_name, model_name=self.model_name) + if 'metadata' not in pool + ] + + ceph_pools_detail = zaza_ceph.get_ceph_pool_details( + model_name=self.model_name) + + logging.debug('BEFORE: {}'.format(ceph_pools_detail)) + try: + logging.info('Checking Ceph pool compression_mode prior to change') + self._assert_pools_properties( + app_pools, ceph_pools_detail, + {'options': {'compression_mode': 'none'}}) + except KeyError: + logging.info('property does not exist on pool, which is OK.') + logging.info('Changing "bluestore-compression-mode" to "force" on {}' + .format(self.application_name)) + with self.config_change( + {'bluestore-compression-mode': 'none'}, + {'bluestore-compression-mode': 'force'}): + # Retrieve pool details from Ceph after changing configuration + ceph_pools_detail = zaza_ceph.get_ceph_pool_details( + model_name=self.model_name) + logging.debug('CONFIG_CHANGE: {}'.format(ceph_pools_detail)) + logging.info('Checking Ceph pool compression_mode after to change') + self._assert_pools_properties( + app_pools, ceph_pools_detail, + {'options': {'compression_mode': 'force'}}) + ceph_pools_detail = zaza_ceph.get_ceph_pool_details( + model_name=self.model_name) + logging.debug('AFTER: {}'.format(ceph_pools_detail)) + logging.debug(juju_utils.get_relation_from_unit( + 'ceph-mon', self.application_name, None, + model_name=self.model_name)) + logging.info('Checking Ceph pool compression_mode after restoring ' + 'config to previous value') + self._assert_pools_properties( + app_pools, ceph_pools_detail, + {'options': {'compression_mode': 'none'}}) + + def test_invalid_compression_configuration(self): + """Set invalid configuration and validate charm response.""" + if not self.mimic_or_newer: + logging.info('Skipping test, Mimic or newer required.') + return + stored_target_deploy_status = self.test_config.get( + 'target_deploy_status', {}) + new_target_deploy_status = stored_target_deploy_status.copy() + new_target_deploy_status[self.application_name] = { + 'workload-status': 'blocked', + 'workload-status-message': 'Invalid configuration', + } + if 'target_deploy_status' in self.test_config: + self.test_config['target_deploy_status'].update( + new_target_deploy_status) + else: + self.test_config['target_deploy_status'] = new_target_deploy_status + + with self.config_change( + {'bluestore-compression-mode': 'none'}, + {'bluestore-compression-mode': 'PEBCAK'}): + logging.info('Charm went into blocked state as expected, restore ' + 'configuration') + self.test_config[ + 'target_deploy_status'] = stored_target_deploy_status diff --git a/zaza/openstack/charm_tests/charm_upgrade/__init__.py b/zaza/openstack/charm_tests/charm_upgrade/__init__.py new file mode 100644 index 0000000..fc5baa5 --- /dev/null +++ b/zaza/openstack/charm_tests/charm_upgrade/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test charm upgrade.""" diff --git a/zaza/openstack/charm_tests/charm_upgrade/tests.py b/zaza/openstack/charm_tests/charm_upgrade/tests.py new file mode 100644 index 0000000..f55caf0 --- /dev/null +++ b/zaza/openstack/charm_tests/charm_upgrade/tests.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Define class for Charm Upgrade.""" + +import logging +import unittest + +import zaza.model +from zaza.openstack.utilities import ( + cli as cli_utils, + upgrade_utils as upgrade_utils, +) +from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest + + +class FullCloudCharmUpgradeTest(unittest.TestCase): + """Class to encapsulate Charm Upgrade Tests.""" + + @classmethod + def setUpClass(cls): + """Run setup for Charm Upgrades.""" + cli_utils.setup_logging() + cls.lts = LTSGuestCreateTest() + cls.lts.setUpClass() + cls.target_charm_namespace = '~openstack-charmers-next' + + def get_upgrade_url(self, charm_url): + """Return the charm_url to upgrade to. + + :param charm_url: Current charm url. + :type charm_url: str + """ + charm_name = upgrade_utils.extract_charm_name_from_url( + charm_url) + next_charm_url = zaza.model.get_latest_charm_url( + "cs:{}/{}".format(self.target_charm_namespace, charm_name)) + return next_charm_url + + def test_200_run_charm_upgrade(self): + """Run charm upgrade.""" + self.lts.test_launch_small_instance() + applications = zaza.model.get_status().applications + groups = upgrade_utils.get_charm_upgrade_groups( + extra_filters=[upgrade_utils._filter_etcd, + upgrade_utils._filter_easyrsa, + upgrade_utils._filter_memcached]) + for group_name, group in groups: + logging.info("About to upgrade {} ({})".format(group_name, group)) + for application, app_details in applications.items(): + if application not in group: + continue + target_url = self.get_upgrade_url(app_details['charm']) + if target_url == app_details['charm']: + logging.warn( + "Skipping upgrade of {}, already using {}".format( + application, + target_url)) + else: + logging.info("Upgrading {} to {}".format( + application, + target_url)) + zaza.model.upgrade_charm( + application, + switch=target_url) + logging.info("Waiting for charm url to update") + zaza.model.block_until_charm_url(application, target_url) + zaza.model.block_until_all_units_idle() + self.lts.test_launch_small_instance() diff --git a/zaza/openstack/charm_tests/cinder/tests.py b/zaza/openstack/charm_tests/cinder/tests.py index 1809cb9..552d8a2 100644 --- a/zaza/openstack/charm_tests/cinder/tests.py +++ b/zaza/openstack/charm_tests/cinder/tests.py @@ -23,6 +23,12 @@ import zaza.openstack.charm_tests.test_utils as test_utils import zaza.openstack.utilities.openstack as openstack_utils import zaza.openstack.charm_tests.glance.setup as glance_setup +from tenacity import ( + Retrying, + stop_after_attempt, + wait_exponential, +) + class CinderTests(test_utils.OpenStackBaseTest): """Encapsulate Cinder tests.""" @@ -32,7 +38,10 @@ class CinderTests(test_utils.OpenStackBaseTest): @classmethod def setUpClass(cls): """Run class setup for running tests.""" - super(CinderTests, cls).setUpClass() + super(CinderTests, cls).setUpClass(application_name='cinder') + cls.application_name = 'cinder' + cls.lead_unit = zaza.model.get_lead_unit_name( + "cinder", model_name=cls.model_name) cls.cinder_client = openstack_utils.get_cinder_session_client( cls.keystone_session) cls.nova_client = openstack_utils.get_nova_session_client( @@ -42,18 +51,66 @@ class CinderTests(test_utils.OpenStackBaseTest): def tearDown(cls): """Remove test resources.""" logging.info('Running teardown') - for snapshot in cls.cinder_client.volume_snapshots.list(): + for attempt in Retrying( + stop=stop_after_attempt(8), + wait=wait_exponential(multiplier=1, min=2, max=60)): + with attempt: + volumes = list(cls.cinder_client.volumes.list()) + snapped_volumes = [v for v in volumes + if v.name.endswith("-from-snap")] + if snapped_volumes: + logging.info("Removing volumes from snapshot") + cls._remove_volumes(snapped_volumes) + volumes = list(cls.cinder_client.volumes.list()) + + snapshots = list(cls.cinder_client.volume_snapshots.list()) + if snapshots: + logging.info("tearDown - snapshots: {}".format( + ", ".join(s.name for s in snapshots))) + cls._remove_snapshots(snapshots) + + if volumes: + logging.info("tearDown - volumes: {}".format( + ", ".join(v.name for v in volumes))) + cls._remove_volumes(volumes) + + @classmethod + def _remove_snapshots(cls, snapshots): + """Remove snapshots passed as param. + + :param volumes: the snapshots to delete + :type volumes: List[snapshot objects] + """ + for snapshot in snapshots: if snapshot.name.startswith(cls.RESOURCE_PREFIX): - openstack_utils.delete_resource( - cls.cinder_client.volume_snapshots, - snapshot.id, - msg="snapshot") - for volume in cls.cinder_client.volumes.list(): + logging.info("removing snapshot: {}".format(snapshot.name)) + try: + openstack_utils.delete_resource( + cls.cinder_client.volume_snapshots, + snapshot.id, + msg="snapshot") + except Exception as e: + logging.error("error removing snapshot: {}".format(str(e))) + raise + + @classmethod + def _remove_volumes(cls, volumes): + """Remove volumes passed as param. + + :param volumes: the volumes to delete + :type volumes: List[volume objects] + """ + for volume in volumes: if volume.name.startswith(cls.RESOURCE_PREFIX): - openstack_utils.delete_resource( - cls.cinder_client.volumes, - volume.id, - msg="volume") + logging.info("removing volume: {}".format(volume.name)) + try: + openstack_utils.delete_resource( + cls.cinder_client.volumes, + volume.id, + msg="volume") + except Exception as e: + logging.error("error removing volume: {}".format(str(e))) + raise def test_100_volume_create_extend_delete(self): """Test creating, extending a volume.""" @@ -63,6 +120,8 @@ class CinderTests(test_utils.OpenStackBaseTest): openstack_utils.resource_reaches_status( self.cinder_client.volumes, vol_new.id, + wait_iteration_max_time=1200, + stop_after_attempt=20, expected_status="available", msg="Volume status wait") self.cinder_client.volumes.extend( @@ -71,20 +130,30 @@ class CinderTests(test_utils.OpenStackBaseTest): openstack_utils.resource_reaches_status( self.cinder_client.volumes, vol_new.id, + wait_iteration_max_time=1200, + stop_after_attempt=20, expected_status="available", msg="Volume status wait") def test_105_volume_create_from_img(self): """Test creating a volume from an image.""" + logging.debug("finding image {} ..." + .format(glance_setup.LTS_IMAGE_NAME)) image = self.nova_client.glance.find_image( glance_setup.LTS_IMAGE_NAME) + logging.debug("using cinder_client to create volume from image {}" + .format(image.id)) vol_img = self.cinder_client.volumes.create( name='{}-105-vol-from-img'.format(self.RESOURCE_PREFIX), size=3, imageRef=image.id) + logging.debug("now waiting for volume {} to reach available" + .format(vol_img.id)) openstack_utils.resource_reaches_status( self.cinder_client.volumes, vol_img.id, + wait_iteration_max_time=1200, + stop_after_attempt=20, expected_status="available", msg="Volume status wait") @@ -97,6 +166,8 @@ class CinderTests(test_utils.OpenStackBaseTest): openstack_utils.resource_reaches_status( self.cinder_client.volumes, vol_new.id, + wait_iteration_max_time=1200, + stop_after_attempt=20, expected_status="available", msg="Volume status wait") @@ -107,6 +178,8 @@ class CinderTests(test_utils.OpenStackBaseTest): openstack_utils.resource_reaches_status( self.cinder_client.volume_snapshots, snap_new.id, + wait_iteration_max_time=1200, + stop_after_attempt=20, expected_status="available", msg="Volume status wait") @@ -118,6 +191,8 @@ class CinderTests(test_utils.OpenStackBaseTest): openstack_utils.resource_reaches_status( self.cinder_client.volumes, vol_from_snap.id, + wait_iteration_max_time=1200, + stop_after_attempt=20, expected_status="available", msg="Volume status wait") @@ -129,6 +204,8 @@ class CinderTests(test_utils.OpenStackBaseTest): openstack_utils.resource_reaches_status( self.cinder_client.volumes, vol_new.id, + wait_iteration_max_time=1200, + stop_after_attempt=20, expected_status="available", msg="Volume status wait") vol_new.force_delete() @@ -139,36 +216,38 @@ class CinderTests(test_utils.OpenStackBaseTest): @property def services(self): - """Return a list services for OpenStack release.""" - services = ['cinder-scheduler', 'cinder-volume'] - if (openstack_utils.get_os_release() >= - openstack_utils.get_os_release('xenial_ocata')): - services.append('apache2') + """Return a list services for the selected OpenStack release.""" + current_value = zaza.model.get_application_config( + self.application_name)['enabled-services']['value'] + + if current_value == "all": + services = ['cinder-scheduler', 'cinder-volume', 'cinder-api'] else: - services.append('cinder-api') + services = ['cinder-{}'.format(svc) + for svc in ('api', 'scheduler', 'volume') + if svc in current_value] + + if ('cinder-api' in services and + (openstack_utils.get_os_release() >= + openstack_utils.get_os_release('xenial_ocata'))): + services.remove('cinder-api') + services.append('apache2') + return services def test_900_restart_on_config_change(self): """Checking restart happens on config change. - Change disk format and assert then change propagates to the correct + Change debug mode and assert that change propagates to the correct file and that services are restarted as a result """ - # Expected default and alternate values - set_default = {'debug': 'False'} - set_alternate = {'debug': 'True'} - # Config file affected by juju set config change conf_file = '/etc/cinder/cinder.conf' # Make config change, check for service restarts - logging.debug('Setting disk format glance...') - self.restart_on_changed( + logging.debug('Setting debug mode...') + self.restart_on_changed_debug_oslo_config_file( conf_file, - set_default, - set_alternate, - {'DEFAULT': {'debug': ['False']}}, - {'DEFAULT': {'debug': ['True']}}, self.services) def test_901_pause_resume(self): @@ -177,13 +256,7 @@ class CinderTests(test_utils.OpenStackBaseTest): Pause service and check services are stopped then resume and check they are started """ - services = ['cinder-scheduler', 'cinder-volume'] - if (openstack_utils.get_os_release() >= - openstack_utils.get_os_release('xenial_ocata')): - services.append('apache2') - else: - services.append('cinder-api') - with self.pause_resume(services): + with self.pause_resume(self.services): logging.info("Testing pause resume") diff --git a/zaza/openstack/charm_tests/cinder_backup/__init__.py b/zaza/openstack/charm_tests/cinder_backup/__init__.py new file mode 100644 index 0000000..6501e55 --- /dev/null +++ b/zaza/openstack/charm_tests/cinder_backup/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing cinder-backup.""" diff --git a/zaza/openstack/charm_tests/cinder_backup/tests.py b/zaza/openstack/charm_tests/cinder_backup/tests.py new file mode 100644 index 0000000..97b3658 --- /dev/null +++ b/zaza/openstack/charm_tests/cinder_backup/tests.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 +# +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate cinder-backup testing.""" +import copy +import logging + +import tenacity + +import zaza.model +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.ceph as ceph_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +class CinderBackupTest(test_utils.OpenStackBaseTest): + """Encapsulate Cinder Backup tests.""" + + RESOURCE_PREFIX = 'zaza-cinderbackuptests' + + @classmethod + def setUpClass(cls): + """Run class setup for running Cinder Backup tests.""" + super(CinderBackupTest, cls).setUpClass() + cls.cinder_client = openstack_utils.get_cinder_session_client( + cls.keystone_session) + + @property + def services(self): + """Return a list services for the selected OpenStack release.""" + current_release = openstack_utils.get_os_release() + services = ['cinder-scheduler', 'cinder-volume'] + if (current_release >= + openstack_utils.get_os_release('xenial_ocata')): + services.append('apache2') + else: + services.append('cinder-api') + return services + + def test_100_volume_create_extend_delete(self): + """Test creating, extending a volume.""" + vol_new = openstack_utils.create_volume( + self.cinder_client, + name='{}-100-vol'.format(self.RESOURCE_PREFIX), + size=1) + self.cinder_client.volumes.extend( + vol_new.id, + '2') + openstack_utils.resource_reaches_status( + self.cinder_client.volumes, + vol_new.id, + expected_status="available", + msg="Extended volume") + + def test_410_cinder_vol_create_backup_delete_restore_pool_inspect(self): + """Create, backup, delete, restore a ceph-backed cinder volume. + + Create, backup, delete, restore a ceph-backed cinder volume, and + inspect ceph cinder pool object count as the volume is created + and deleted. + """ + unit_name = zaza.model.get_lead_unit_name('ceph-mon') + obj_count_samples = [] + pool_size_samples = [] + pools = ceph_utils.get_ceph_pools(unit_name) + expected_pool = 'cinder-ceph' + cinder_ceph_pool = pools[expected_pool] + + # Check ceph cinder pool object count, disk space usage and pool name + logging.info('Checking ceph cinder pool original samples...') + pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample( + unit_name, cinder_ceph_pool) + + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + self.assertEqual(pool_name, expected_pool) + + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(3)): + with attempt: + # Create ceph-backed cinder volume + cinder_vol_name = '{}-410-{}-vol'.format( + self.RESOURCE_PREFIX, attempt.retry_state.attempt_number) + cinder_vol = self.cinder_client.volumes.create( + name=cinder_vol_name, size=1) + openstack_utils.resource_reaches_status( + self.cinder_client.volumes, + cinder_vol.id, + wait_iteration_max_time=180, + stop_after_attempt=15, + expected_status='available', + msg='ceph-backed cinder volume') + + # Back up the volume + # NOTE(lourot): sometimes, especially on Mitaka, the backup + # remains stuck forever in 'creating' state and the volume in + # 'backing-up' state. See lp:1877076 + # Attempting to create another volume and another backup + # usually then succeeds. Release notes and bug trackers show + # that many things have been fixed and are still left to be + # fixed in this area. + # When the backup creation succeeds, it usually does within + # 12 minutes. + vol_backup_name = '{}-410-{}-backup-vol'.format( + self.RESOURCE_PREFIX, attempt.retry_state.attempt_number) + vol_backup = self.cinder_client.backups.create( + cinder_vol.id, name=vol_backup_name) + openstack_utils.resource_reaches_status( + self.cinder_client.backups, + vol_backup.id, + wait_iteration_max_time=180, + stop_after_attempt=15, + expected_status='available', + msg='Backup volume') + + # Delete the volume + openstack_utils.delete_volume(self.cinder_client, cinder_vol.id) + # Restore the volume + self.cinder_client.restores.restore(vol_backup.id) + openstack_utils.resource_reaches_status( + self.cinder_client.backups, + vol_backup.id, + wait_iteration_max_time=180, + stop_after_attempt=15, + expected_status='available', + msg='Restored backup volume') + # Delete the backup + openstack_utils.delete_volume_backup( + self.cinder_client, + vol_backup.id) + openstack_utils.resource_removed( + self.cinder_client.backups, + vol_backup.id, + wait_iteration_max_time=180, + stop_after_attempt=15, + msg="Backup volume") + + # Re-check ceph cinder pool object count and disk usage + logging.info('Checking ceph cinder pool samples ' + 'after volume create...') + pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample( + unit_name, cinder_ceph_pool, self.model_name) + + obj_count_samples.append(obj_count) + pool_size_samples.append(kb_used) + + vols = self.cinder_client.volumes.list() + try: + cinder_vols = [v for v in vols if v.name == cinder_vol_name] + except AttributeError: + cinder_vols = [v for v in vols if + v.display_name == cinder_vol_name] + if not cinder_vols: + # NOTE(hopem): it appears that at some point cinder-backup stopped + # restoring volume metadata properly so revert to default name if + # original is not found + name = "restore_backup_{}".format(vol_backup.id) + try: + cinder_vols = [v for v in vols if v.name == name] + except AttributeError: + cinder_vols = [v for v in vols if v.display_name == name] + + self.assertTrue(cinder_vols) + + cinder_vol = cinder_vols[0] + + # Delete restored cinder volume + openstack_utils.delete_volume(self.cinder_client, cinder_vol.id) + openstack_utils.resource_removed( + self.cinder_client.volumes, + cinder_vol.id, + wait_iteration_max_time=180, + stop_after_attempt=15, + msg="Volume") + + @tenacity.retry(wait=tenacity.wait_exponential(multiplier=10, max=300), + reraise=True, stop=tenacity.stop_after_attempt(10), + retry=tenacity.retry_if_exception_type(AssertionError)) + def _check_get_ceph_pool_sample(obj_count_samples, pool_size_samples): + pool_name, obj_count, kb_used = ceph_utils.get_ceph_pool_sample( + unit_name, cinder_ceph_pool, self.model_name) + + _obj_count_samples = copy.deepcopy(obj_count_samples) + _pool_size_samples = copy.deepcopy(pool_size_samples) + _obj_count_samples.append(obj_count) + _pool_size_samples.append(kb_used) + # Validate ceph cinder pool object count samples over time + original, created, deleted = range(3) + self.assertFalse(_obj_count_samples[created] <= + _obj_count_samples[original]) + self.assertFalse(_obj_count_samples[deleted] >= + _obj_count_samples[created]) + + # Luminous (pike) ceph seems more efficient at disk usage so we + # cannot guarantee the ordering of kb_used + if (openstack_utils.get_os_release() < + openstack_utils.get_os_release('xenial_mitaka')): + self.assertFalse(_pool_size_samples[created] <= + _pool_size_samples[original]) + self.assertFalse(_pool_size_samples[deleted] >= + _pool_size_samples[created]) + + # Final check, ceph cinder pool object count and disk usage + logging.info('Checking ceph cinder pool after volume delete...') + # It sometime takes a short time for removal to be reflected in + # get_ceph_pool_sample so wrap check in tenacity decorator to retry. + _check_get_ceph_pool_sample(obj_count_samples, pool_size_samples) diff --git a/zaza/openstack/charm_tests/cinder_backup_swift/__init__.py b/zaza/openstack/charm_tests/cinder_backup_swift_proxy/__init__.py similarity index 97% rename from zaza/openstack/charm_tests/cinder_backup_swift/__init__.py rename to zaza/openstack/charm_tests/cinder_backup_swift_proxy/__init__.py index 3d7e48c..e093594 100644 --- a/zaza/openstack/charm_tests/cinder_backup_swift/__init__.py +++ b/zaza/openstack/charm_tests/cinder_backup_swift_proxy/__init__.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Collection of code for setting up and testing cinder-backup-swift.""" +"""Collection of code for setting up and testing cinder-backup-swift-proxy.""" diff --git a/zaza/openstack/charm_tests/cinder_backup_swift/setup.py b/zaza/openstack/charm_tests/cinder_backup_swift_proxy/setup.py similarity index 78% rename from zaza/openstack/charm_tests/cinder_backup_swift/setup.py rename to zaza/openstack/charm_tests/cinder_backup_swift_proxy/setup.py index d84ad2e..d4c92e4 100644 --- a/zaza/openstack/charm_tests/cinder_backup_swift/setup.py +++ b/zaza/openstack/charm_tests/cinder_backup_swift_proxy/setup.py @@ -12,14 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Code for configuring cinder-backup-swift.""" +"""Code for configuring cinder-backup-swift-proxy.""" import zaza.model as zaza_model import zaza.openstack.charm_tests.test_utils def configure_cinder_backup(): - """Configure cinder-backup-swift.""" + """Configure cinder-backup-swift-proxy.""" keystone_ip = zaza_model.get_app_ips( 'swift-keystone')[0] swift_ip = zaza_model.get_app_ips( @@ -32,17 +32,18 @@ def configure_cinder_backup(): else: auth_url = 'http://{}:5000/v3'.format(keystone_ip) endpoint_url = 'http://{}:8080/v1/AUTH'.format(swift_ip) - cinder_backup_swift_conf = { + cinder_backup_swift_proxy_conf = { 'endpoint-url': endpoint_url, 'auth-url': auth_url } - juju_service = 'cinder-backup-swift' - zaza_model.set_application_config(juju_service, cinder_backup_swift_conf) + juju_service = 'cinder-backup-swift-proxy' + zaza_model.set_application_config(juju_service, + cinder_backup_swift_proxy_conf) zaza_model.wait_for_agent_status() zaza_model.wait_for_application_states() _singleton = zaza.openstack.charm_tests.test_utils.OpenStackBaseTest() _singleton.setUpClass() - with _singleton.config_change(cinder_backup_swift_conf, - cinder_backup_swift_conf): + with _singleton.config_change(cinder_backup_swift_proxy_conf, + cinder_backup_swift_proxy_conf): # wait for configuration to be applied then return pass diff --git a/zaza/openstack/charm_tests/cinder_backup_swift/tests.py b/zaza/openstack/charm_tests/cinder_backup_swift_proxy/tests.py similarity index 100% rename from zaza/openstack/charm_tests/cinder_backup_swift/tests.py rename to zaza/openstack/charm_tests/cinder_backup_swift_proxy/tests.py diff --git a/zaza/openstack/charm_tests/designate/__init__.py b/zaza/openstack/charm_tests/designate/__init__.py new file mode 100644 index 0000000..78c15a3 --- /dev/null +++ b/zaza/openstack/charm_tests/designate/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing designate.""" diff --git a/zaza/openstack/charm_tests/designate/tests.py b/zaza/openstack/charm_tests/designate/tests.py new file mode 100644 index 0000000..78a5f31 --- /dev/null +++ b/zaza/openstack/charm_tests/designate/tests.py @@ -0,0 +1,328 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate designate testing.""" +import logging +import unittest +import tenacity +import subprocess + +import designateclient.v1.domains as domains +import designateclient.v1.records as records +import designateclient.v1.servers as servers + +import zaza.model +import zaza.utilities.juju as juju_utils +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.charm_tests.designate.utils as designate_utils +import zaza.charm_lifecycle.utils as lifecycle_utils + + +class BaseDesignateTest(test_utils.OpenStackBaseTest): + """Base for Designate charm tests.""" + + @classmethod + def setUpClass(cls, application_name=None, model_alias=None): + """Run class setup for running Designate charm operation tests.""" + application_name = application_name or "designate" + model_alias = model_alias or "" + super(BaseDesignateTest, cls).setUpClass(application_name, model_alias) + os_release = openstack_utils.get_os_release + + if os_release() >= os_release('bionic_rocky'): + cls.designate_svcs = [ + 'designate-agent', 'designate-api', 'designate-central', + 'designate-mdns', 'designate-worker', 'designate-sink', + 'designate-producer', + ] + else: + cls.designate_svcs = [ + 'designate-agent', 'designate-api', 'designate-central', + 'designate-mdns', 'designate-pool-manager', 'designate-sink', + 'designate-zone-manager', + ] + + # Get keystone session + cls.post_xenial_queens = os_release() >= os_release('xenial_queens') + overcloud_auth = openstack_utils.get_overcloud_auth() + keystone = openstack_utils.get_keystone_client(overcloud_auth) + + keystone_session = openstack_utils.get_overcloud_keystone_session() + if cls.post_xenial_queens: + cls.designate = openstack_utils.get_designate_session_client( + session=keystone_session + ) + cls.domain_list = cls.designate.zones.list + cls.domain_delete = cls.designate.zones.delete + cls.domain_create = cls.designate.zones.create + else: + # Authenticate admin with designate endpoint + designate_ep = keystone.service_catalog.url_for( + service_type='dns', + interface='publicURL') + keystone_ep = keystone.service_catalog.url_for( + service_type='identity', + interface='publicURL') + cls.designate = openstack_utils.get_designate_session_client( + version=1, + auth_url=keystone_ep, + token=keystone_session.get_token(), + tenant_name="admin", + endpoint=designate_ep) + cls.domain_list = cls.designate.domains.list + cls.domain_delete = cls.designate.domains.delete + cls.domain_create = cls.designate.domains.create + cls.server_list = cls.designate.servers.list + cls.server_create = cls.designate.servers.create + cls.server_delete = cls.designate.servers.delete + + +class DesignateAPITests(BaseDesignateTest): + """Tests interact with designate api.""" + + TEST_DOMAIN = 'amuletexample.com.' + TEST_NS1_RECORD = 'ns1.{}'.format(TEST_DOMAIN) + TEST_NS2_RECORD = 'ns2.{}'.format(TEST_DOMAIN) + TEST_WWW_RECORD = "www.{}".format(TEST_DOMAIN) + TEST_RECORD = {TEST_WWW_RECORD: '10.0.0.23'} + + def _get_server_id(self, server_name=None, server_id=None): + for srv in self.server_list(): + if isinstance(srv, dict): + if srv['id'] == server_id or srv['name'] == server_name: + return srv['id'] + elif srv.name == server_name or srv.id == server_id: + return srv.id + return None + + def _wait_on_server_gone(self, server_id): + @tenacity.retry( + wait=tenacity.wait_exponential(multiplier=1, min=5, max=10), + reraise=True + ) + def wait(): + logging.debug('Waiting for server %s to disappear', server_id) + if self._get_server_id(server_id=server_id): + raise Exception("Server Exists") + self.server_delete(server_id) + return wait() + + def test_400_server_creation(self): + """Simple api calls to create a server.""" + # Designate does not allow the last server to be deleted so ensure + # that ns1 is always present + if self.post_xenial_queens: + logging.info('Skipping server creation tests for Queens and above') + return + + if not self._get_server_id(server_name=self.TEST_NS1_RECORD): + server = servers.Server(name=self.TEST_NS1_RECORD) + new_server = self.server_create(server) + self.assertIsNotNone(new_server) + + logging.debug('Checking if server exists before trying to create it') + old_server_id = self._get_server_id(server_name=self.TEST_NS2_RECORD) + if old_server_id: + logging.debug('Deleting old server') + self._wait_on_server_gone(old_server_id) + + logging.debug('Creating new server') + server = servers.Server(name=self.TEST_NS2_RECORD) + new_server = self.server_create(server) + self.assertIsNotNone(new_server, "Failed to Create Server") + self._wait_on_server_gone(self._get_server_id(self.TEST_NS2_RECORD)) + + def _get_domain_id(self, domain_name=None, domain_id=None): + for dom in self.domain_list(): + if isinstance(dom, dict): + if dom['id'] == domain_id or dom['name'] == domain_name: + return dom['id'] + elif dom.id == domain_id or dom.name == domain_name: + return dom.id + return None + + def _wait_on_domain_gone(self, domain_id): + @tenacity.retry( + wait=tenacity.wait_exponential(multiplier=1, min=5, max=10), + reraise=True + ) + def wait(): + logging.debug('Waiting for domain %s to disappear', domain_id) + if self._get_domain_id(domain_id=domain_id): + raise Exception("Domain Exists") + self.domain_delete(domain_id) + wait() + + @tenacity.retry( + wait=tenacity.wait_exponential(multiplier=1, min=5, max=10), + reraise=True + ) + def _wait_to_resolve_test_record(self): + dns_ip = juju_utils.get_relation_from_unit( + 'designate/0', + 'designate-bind/0', + 'dns-backend' + ).get('private-address') + + logging.info('Waiting for dns record to propagate @ {}'.format(dns_ip)) + lookup_cmd = [ + 'dig', '+short', '@{}'.format(dns_ip), + self.TEST_WWW_RECORD] + cmd_out = subprocess.check_output( + lookup_cmd, universal_newlines=True).rstrip() + if not self.TEST_RECORD[self.TEST_WWW_RECORD] == cmd_out: + raise Exception("Record Doesn't Exist") + + def test_400_domain_creation(self): + """Simple api calls to create domain.""" + logging.debug('Checking if domain exists before trying to create it') + old_dom_id = self._get_domain_id(domain_name=self.TEST_DOMAIN) + if old_dom_id: + logging.debug('Deleting old domain') + self._wait_on_domain_gone(old_dom_id) + + logging.debug('Creating new domain') + domain = domains.Domain( + name=self.TEST_DOMAIN, + email="fred@amuletexample.com") + + if self.post_xenial_queens: + new_domain = self.domain_create( + name=domain.name, email=domain.email) + else: + new_domain = self.domain_create(domain) + self.assertIsNotNone(new_domain) + + logging.debug('Creating new test record') + _record = records.Record( + name=self.TEST_WWW_RECORD, + type="A", + data=self.TEST_RECORD[self.TEST_WWW_RECORD]) + + if self.post_xenial_queens: + domain_id = new_domain['id'] + self.designate.recordsets.create( + domain_id, _record.name, _record.type, [_record.data]) + else: + domain_id = new_domain.id + self.designate.records.create(domain_id, _record) + + self._wait_to_resolve_test_record() + + logging.debug('Tidy up delete test record') + self._wait_on_domain_gone(domain_id) + logging.debug('OK') + + +class DesignateCharmTests(BaseDesignateTest): + """Designate charm restart and pause tests.""" + + def test_900_restart_on_config_change(self): + """Checking restart happens on config change. + + Change debug mode and assert that change propagates to the correct + file and that services are restarted as a result + """ + # Services which are expected to restart upon config change, + # and corresponding config files affected by the change + conf_file = '/etc/designate/designate.conf' + + # Make config change, check for service restarts + self.restart_on_changed_debug_oslo_config_file( + conf_file, + self.designate_svcs, + ) + + def test_910_pause_and_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + with self.pause_resume( + self.designate_svcs, + pgrep_full=False): + logging.info("Testing pause resume") + + +class DesignateTests(DesignateAPITests, DesignateCharmTests): + """Collection of all Designate test classes.""" + + pass + + +class DesignateBindExpand(BaseDesignateTest): + """Test expanding and shrinking bind.""" + + TEST_DOMAIN = 'zazabindtesting.com.' + TEST_NS1_RECORD = 'ns1.{}'.format(TEST_DOMAIN) + TEST_NS2_RECORD = 'ns2.{}'.format(TEST_DOMAIN) + TEST_WWW_RECORD = "www.{}".format(TEST_DOMAIN) + TEST_RECORD = {TEST_WWW_RECORD: '10.0.0.24'} + + def test_expand_and_contract(self): + """Test expanding and shrinking bind.""" + test_config = lifecycle_utils.get_charm_config(fatal=False) + states = test_config.get("target_deploy_status", {}) + if not self.post_xenial_queens: + raise unittest.SkipTest("Test not supported before Queens") + + domain = designate_utils.create_or_return_zone( + self.designate, + name=self.TEST_DOMAIN, + email="test@zaza.com") + + designate_utils.create_or_return_recordset( + self.designate, + domain['id'], + 'www', + 'A', + [self.TEST_RECORD[self.TEST_WWW_RECORD]]) + + # Test record is in bind and designate + designate_utils.check_dns_entry( + self.designate, + self.TEST_RECORD[self.TEST_WWW_RECORD], + self.TEST_DOMAIN, + record_name=self.TEST_WWW_RECORD) + + logging.info('Adding a designate-bind unit') + zaza.model.add_unit('designate-bind', wait_appear=True) + zaza.model.block_until_all_units_idle() + zaza.model.wait_for_application_states(states=states) + + logging.info('Performing DNS lookup on all units') + designate_utils.check_dns_entry( + self.designate, + self.TEST_RECORD[self.TEST_WWW_RECORD], + self.TEST_DOMAIN, + record_name=self.TEST_WWW_RECORD) + + units = zaza.model.get_status().applications['designate-bind']['units'] + doomed_unit = sorted(units.keys())[0] + logging.info('Removing {}'.format(doomed_unit)) + zaza.model.destroy_unit( + 'designate-bind', + doomed_unit, + wait_disappear=True) + zaza.model.block_until_all_units_idle() + zaza.model.wait_for_application_states(states=states) + + logging.info('Performing DNS lookup on all units') + designate_utils.check_dns_entry( + self.designate, + self.TEST_RECORD[self.TEST_WWW_RECORD], + self.TEST_DOMAIN, + record_name=self.TEST_WWW_RECORD) diff --git a/zaza/openstack/charm_tests/designate/utils.py b/zaza/openstack/charm_tests/designate/utils.py new file mode 100644 index 0000000..bde69ff --- /dev/null +++ b/zaza/openstack/charm_tests/designate/utils.py @@ -0,0 +1,205 @@ +"""Utilities for interacting with designate.""" + +import dns.resolver +import logging +import tenacity + +import designateclient.exceptions + +import zaza.model + + +def create_or_return_zone(client, name, email): + """Create zone or return matching existing zone. + + :param designate_client: Client to query designate + :type designate_client: designateclient.v2.Client + :param name: Name of zone + :type name: str + :param email: Email address to associate with zone. + :type email: str + :returns: Zone + :rtype: designateclient.v2.zones.Zone + """ + try: + zone = client.zones.create( + name=name, + email=email) + except designateclient.exceptions.Conflict: + logging.info('{} zone already exists.'.format(name)) + zones = [z for z in client.zones.list() if z['name'] == name] + assert len(zones) == 1, "Wrong number of zones found {}".format(zones) + zone = zones[0] + return zone + + +def create_or_return_recordset(client, zone_id, sub_domain, record_type, data): + """Create recordset or return matching existing recordset. + + :param designate_client: Client to query designate + :type designate_client: designateclient.v2.Client + :param zone_id: uuid of zone + :type zone_id: str + :param sub_domain: Subdomain to associate records with + :type sub_domain: str + :param data: Dictionary of entries eg {'www.test.com': '10.0.0.24'} + :type data: dict + :returns: RecordSet + :rtype: designateclient.v2.recordsets.RecordSet + """ + try: + rs = client.recordsets.create( + zone_id, + sub_domain, + record_type, + data) + except designateclient.exceptions.Conflict: + logging.info('{} record already exists.'.format(data)) + for r in client.recordsets.list(zone_id): + if r['name'].split('.')[0] == sub_domain: + rs = r + return rs + + +def get_designate_zone_objects(designate_client, domain_name=None, + domain_id=None): + """Get all domains matching a given domain_name or domain_id. + + :param designate_client: Client to query designate + :type designate_client: designateclient.v2.Client + :param domain_name: Name of domain to lookup + :type domain_name: str + :param domain_id: UUID of domain to lookup + :type domain_id: str + :returns: List of Domain objects matching domain_name or domain_id + :rtype: [designateclient.v2.domains.Domain,] + """ + all_zones = designate_client.zones.list() + a = [z for z in all_zones + if z['name'] == domain_name or z['id'] == domain_id] + return a + + +def get_designate_domain_object(designate_client, domain_name): + """Get the one and only domain matching the given domain_name. + + :param designate_client: Client to query designate + :type designate_client: designateclient.v2.Client + :param domain_name: Name of domain to lookup + :type domain_name:str + :returns: Domain with name domain_name + :rtype: designateclient.v2.domains.Domain + :raises: AssertionError + """ + dns_zone_id = get_designate_zone_objects(designate_client, + domain_name=domain_name) + msg = "Found {} domains for {}".format( + len(dns_zone_id), + domain_name) + assert len(dns_zone_id) == 1, msg + return dns_zone_id[0] + + +def get_designate_dns_records(designate_client, domain_name, ip): + """Look for records in designate that match the given ip. + + :param designate_client: Client to query designate + :type designate_client: designateclient.v2.Client + :param domain_name: Name of domain to lookup + :type domain_name:str + :returns: List of Record objects matching matching IP address + :rtype: [designateclient.v2.records.Record,] + """ + dns_zone = get_designate_domain_object(designate_client, domain_name) + return [r for r in designate_client.recordsets.list(dns_zone['id']) + if r['records'] == ip] + + +def check_dns_record_exists(dns_server_ip, query_name, expected_ip, + retry_count=3): + """Lookup a DNS record against the given dns server address. + + :param dns_server_ip: IP address to run query against + :type dns_server_ip: str + :param query_name: Record to lookup + :type query_name: str + :param expected_ip: IP address expected to be associated with record. + :type expected_ip: str + :param retry_count: Number of times to retry query. Useful if waiting + for record to propagate. + :type retry_count: int + :raises: AssertionError + """ + my_resolver = dns.resolver.Resolver() + my_resolver.nameservers = [dns_server_ip] + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(retry_count), + wait=tenacity.wait_exponential(multiplier=1, min=2, max=10), + reraise=True): + with attempt: + logging.info("Checking record {} against {}".format( + query_name, + dns_server_ip)) + answers = my_resolver.query(query_name) + for rdata in answers: + logging.info("Checking address returned by {} is correct".format( + dns_server_ip)) + assert str(rdata) == expected_ip + + +def check_dns_entry(des_client, ip, domain, record_name): + """Check that record for ip is in designate and in bind. + + :param ip: IP address to lookup + :type ip: str + :param domain_name: Domain to look for record in + :type domain_name:str + :param record_name: record name + :type record_name: str + """ + check_dns_entry_in_designate(des_client, [ip], domain, + record_name=record_name) + check_dns_entry_in_bind(ip, record_name) + + +def check_dns_entry_in_designate(des_client, ip, domain, record_name=None): + """Look for records in designate that match the given ip domain. + + :param designate_client: Client to query designate + :type designate_client: designateclient.v2.Client + :param ip: IP address to lookup in designate + :type ip: str + :param domain_name: Name of domain to lookup + :type domain_name:str + :param record_name: Retrieved record should have this name + :type record_name: str + :raises: AssertionError + """ + records = get_designate_dns_records(des_client, domain, ip) + assert records, "Record not found for {} in designate".format(ip) + logging.info('Found record in {} for {} in designate'.format(domain, ip)) + + if record_name: + recs = [r for r in records if r['name'] == record_name] + assert recs, "No DNS entry name matches expected name {}".format( + record_name) + logging.info('Found record in {} for {} in designate'.format( + domain, + record_name)) + + +def check_dns_entry_in_bind(ip, record_name, model_name=None): + """Check that record for ip address is in bind. + + :param ip: IP address to lookup + :type ip: str + :param record_name: record name + :type record_name: str + """ + for addr in zaza.model.get_app_ips('designate-bind', + model_name=model_name): + logging.info("Checking {} is {} against ({})".format( + record_name, + ip, + addr)) + check_dns_record_exists(addr, record_name, ip, retry_count=6) diff --git a/zaza/openstack/charm_tests/dragent/configure.py b/zaza/openstack/charm_tests/dragent/configure.py index 6d6e075..f509609 100644 --- a/zaza/openstack/charm_tests/dragent/configure.py +++ b/zaza/openstack/charm_tests/dragent/configure.py @@ -16,6 +16,8 @@ """Setup for BGP deployments.""" +import logging +import zaza.model from zaza.openstack.configure import ( network, bgp_speaker, @@ -86,6 +88,15 @@ def setup(): # Confugre the overcloud network network.setup_sdn(network_config, keystone_session=keystone_session) + + # LP Bugs #1784083 and #1841459, require a late restart of the + # neutron-bgp-dragent service + logging.warning("Due to LP Bugs #1784083 and #1841459, we require a late " + "restart of the neutron-bgp-dragent service before " + "setting up BGP.") + for unit in zaza.model.get_units("neutron-dynamic-routing"): + generic_utils.systemctl(unit, "neutron-bgp-dragent", command="restart") + # Configure BGP bgp_speaker.setup_bgp_speaker( peer_application_name=DEFAULT_PEER_APPLICATION_NAME, diff --git a/zaza/openstack/charm_tests/dragent/test.py b/zaza/openstack/charm_tests/dragent/test.py index de4e230..90aacb9 100644 --- a/zaza/openstack/charm_tests/dragent/test.py +++ b/zaza/openstack/charm_tests/dragent/test.py @@ -61,16 +61,17 @@ def test_bgp_routes(peer_application_name="quagga", keystone_session=None): # This test may run immediately after configuration. It may take time for # routes to propogate via BGP. Do a binary backoff. @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60), - reraise=True, stop=tenacity.stop_after_attempt(8)) + reraise=True, stop=tenacity.stop_after_attempt(10)) def _assert_cidr_in_peer_routing_table(peer_unit, cidr): logging.debug("Checking for {} on BGP peer {}" .format(cidr, peer_unit)) # Run show ip route bgp on BGP peer routes = juju_utils.remote_run( peer_unit, remote_cmd='vtysh -c "show ip route bgp"') - logging.debug(routes) + logging.info(routes) assert cidr in routes, ( - "CIDR, {}, not found in BGP peer's routing table" .format(cidr)) + "CIDR, {}, not found in BGP peer's routing table: {}" + .format(cidr, routes)) _assert_cidr_in_peer_routing_table(peer_unit, private_cidr) logging.info("Private subnet CIDR, {}, found in routing table" diff --git a/zaza/openstack/charm_tests/glance/setup.py b/zaza/openstack/charm_tests/glance/setup.py index c992917..367c980 100644 --- a/zaza/openstack/charm_tests/glance/setup.py +++ b/zaza/openstack/charm_tests/glance/setup.py @@ -14,10 +14,16 @@ """Code for configuring glance.""" +import json import logging + +import boto3 +import zaza.model as model import zaza.openstack.utilities.openstack as openstack_utils +import zaza.utilities.deployment_env as deployment_env CIRROS_IMAGE_NAME = "cirros" +CIRROS_ALT_IMAGE_NAME = "cirros_alt" LTS_RELEASE = "bionic" LTS_IMAGE_NAME = "bionic" @@ -30,7 +36,37 @@ def basic_setup(): """ -def add_image(image_url, glance_client=None, image_name=None, tags=[]): +def _get_default_glance_client(): + """Create default Glance client using overcloud credentials.""" + keystone_session = openstack_utils.get_overcloud_keystone_session() + glance_client = openstack_utils.get_glance_session_client(keystone_session) + return glance_client + + +def get_stores_info(glance_client=None): + """Retrieve glance backing store info. + + :param glance_client: Authenticated glanceclient + :type glance_client: glanceclient.Client + """ + glance_client = glance_client or _get_default_glance_client() + stores = glance_client.images.get_stores_info().get("stores", []) + return stores + + +def get_store_ids(glance_client=None): + """Retrieve glance backing store ids. + + :param glance_client: Authenticated glanceclient + :type glance_client: glanceclient.Client + """ + stores = get_stores_info(glance_client) + return [store["id"] for store in stores] + + +def add_image(image_url, glance_client=None, image_name=None, tags=[], + properties=None, backend=None, disk_format='qcow2', + visibility='public', container_format='bare'): """Retrieve image from ``image_url`` and add it to glance. :param image_url: Retrievable URL with image data @@ -41,11 +77,17 @@ def add_image(image_url, glance_client=None, image_name=None, tags=[]): :type image_name: str :param tags: List of tags to add to image :type tags: list of str + :param properties: Properties to add to image + :type properties: dict """ - if not glance_client: - keystone_session = openstack_utils.get_overcloud_keystone_session() - glance_client = openstack_utils.get_glance_session_client( - keystone_session) + glance_client = glance_client or _get_default_glance_client() + if backend is not None: + stores = get_store_ids(glance_client) + if backend not in stores: + raise ValueError("Invalid backend: %(backend)s " + "(available: %(available)s)" % { + "backend": backend, + "available": ", ".join(stores)}) if image_name: image = openstack_utils.get_images_by_name( glance_client, image_name) @@ -59,7 +101,12 @@ def add_image(image_url, glance_client=None, image_name=None, tags=[]): glance_client, image_url, image_name, - tags=tags) + tags=tags, + properties=properties, + backend=backend, + disk_format=disk_format, + visibility=visibility, + container_format=container_format) def add_cirros_image(glance_client=None, image_name=None): @@ -77,7 +124,20 @@ def add_cirros_image(glance_client=None, image_name=None): image_name=image_name) -def add_lts_image(glance_client=None, image_name=None, release=None): +def add_cirros_alt_image(glance_client=None, image_name=None): + """Add alt cirros image to the current deployment. + + :param glance: Authenticated glanceclient + :type glance: glanceclient.Client + :param image_name: Label for the image in glance + :type image_name: str + """ + image_name = image_name or CIRROS_ALT_IMAGE_NAME + add_cirros_image(glance_client, image_name) + + +def add_lts_image(glance_client=None, image_name=None, release=None, + properties=None): """Add an Ubuntu LTS image to the current deployment. :param glance: Authenticated glanceclient @@ -86,12 +146,77 @@ def add_lts_image(glance_client=None, image_name=None, release=None): :type image_name: str :param release: Name of ubuntu release. :type release: str + :param properties: Custom image properties + :type properties: dict """ + deploy_ctxt = deployment_env.get_deployment_context() + image_arch = deploy_ctxt.get('TEST_IMAGE_ARCH', 'amd64') + arch_image_properties = { + 'arm64': {'hw_firmware_type': 'uefi'}, + 'ppc64el': {'architecture': 'ppc64'}} + properties = properties or arch_image_properties.get(image_arch) + logging.info("Image architecture set to {}".format(image_arch)) image_name = image_name or LTS_IMAGE_NAME release = release or LTS_RELEASE image_url = openstack_utils.find_ubuntu_image( release=release, - arch='amd64') + arch=image_arch) add_image(image_url, glance_client=glance_client, - image_name=image_name) + image_name=image_name, + properties=properties) + + +def configure_external_s3_backend(): + """Set up Ceph-radosgw as an external S3 backend for Glance.""" + logging.info("Creating a test S3 user and credentials for Glance") + username, displayname = "zaza-glance-test", "Zaza Glance Test User" + cmd = "radosgw-admin user create --uid='{}' --display-name='{}'".format( + username, displayname + ) + results = model.run_on_leader("ceph-mon", cmd) + stdout = json.loads(results["stdout"]) + keys = stdout["keys"][0] + access_key, secret_key = keys["access_key"], keys["secret_key"] + + logging.info("Getting S3 endpoint URL of Radosgw from Keystone") + keystone_auth = openstack_utils.get_overcloud_auth() + keystone_client = openstack_utils.get_keystone_client(keystone_auth) + endpoint_url = keystone_client.session.get_endpoint( + service_type="s3", + interface="public", + region="RegionOne", + ) + + logging.info("Creating a test S3 bucket for Glance") + bucket_name = "zaza-glance-s3-test" + s3_client = boto3.client( + "s3", + endpoint_url=endpoint_url, + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + ) + s3_client.create_bucket(Bucket=bucket_name) + + logging.info("Updating Glance configs with S3 endpoint information") + model.set_application_config( + "glance", + { + "s3-store-host": endpoint_url, + "s3-store-access-key": access_key, + "s3-store-secret-key": secret_key, + "s3-store-bucket": bucket_name, + }, + ) + model.wait_for_agent_status() + + logging.info("Waiting for units to reach target states") + model.wait_for_application_states( + states={ + "glance": { + "workload-status": "active", + "workload-status-message": "Unit is ready", + } + } + ) + model.block_until_all_units_idle() diff --git a/zaza/openstack/charm_tests/glance/tests.py b/zaza/openstack/charm_tests/glance/tests.py index 001f17f..a6da291 100644 --- a/zaza/openstack/charm_tests/glance/tests.py +++ b/zaza/openstack/charm_tests/glance/tests.py @@ -18,8 +18,10 @@ import logging -import zaza.openstack.utilities.openstack as openstack_utils +import boto3 +import zaza.model as model import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.openstack as openstack_utils class GlanceTest(test_utils.OpenStackBaseTest): @@ -44,7 +46,7 @@ class GlanceTest(test_utils.OpenStackBaseTest): def test_411_set_disk_format(self): """Change disk format and check. - Change disk format and assert then change propagates to the correct + Change disk format and assert that change propagates to the correct file and that services are restarted as a result """ # Expected default and alternate values @@ -67,6 +69,50 @@ class GlanceTest(test_utils.OpenStackBaseTest): {'image_format': {'disk_formats': ['qcow2']}}, ['glance-api']) + def test_412_image_conversion(self): + """Check image-conversion config. + + When image-conversion config is enabled glance will convert images + to raw format, this is only performed for interoperable image import + docs.openstack.org/glance/train/admin/interoperable-image-import.html + image conversion is done at server-side for better image handling + """ + current_release = openstack_utils.get_os_release() + bionic_stein = openstack_utils.get_os_release('bionic_stein') + if current_release < bionic_stein: + self.skipTest('image-conversion config is supported since ' + 'bionic_stein or newer versions') + + with self.config_change({'image-conversion': 'false'}, + {'image-conversion': 'true'}): + image_url = openstack_utils.find_cirros_image(arch='x86_64') + image = openstack_utils.create_image( + self.glance_client, + image_url, + 'cirros-test-import', + force_import=True) + + disk_format = self.glance_client.images.get(image.id).disk_format + self.assertEqual('raw', disk_format) + + def test_900_restart_on_config_change(self): + """Checking restart happens on config change.""" + # Config file affected by juju set config change + conf_file = '/etc/glance/glance-api.conf' + + # Services which are expected to restart upon config change + services = {'glance-api': conf_file} + current_release = openstack_utils.get_os_release() + bionic_stein = openstack_utils.get_os_release('bionic_stein') + if current_release < bionic_stein: + services.update({'glance-registry': conf_file}) + + # Make config change, check for service restarts + logging.info('changing debug config') + self.restart_on_changed_debug_oslo_config_file( + conf_file, + services) + def test_901_pause_resume(self): """Run pause and resume tests. @@ -74,3 +120,105 @@ class GlanceTest(test_utils.OpenStackBaseTest): they are started """ self.pause_resume(['glance-api']) + + +class GlanceCephRGWBackendTest(test_utils.OpenStackBaseTest): + """Encapsulate glance tests using the Ceph RGW backend. + + It validates the Ceph RGW backend in glance, which uses the Swift API. + """ + + @classmethod + def setUpClass(cls): + """Run class setup for running glance tests.""" + super(GlanceCephRGWBackendTest, cls).setUpClass() + + swift_session = openstack_utils.get_keystone_session_from_relation( + 'ceph-radosgw') + cls.swift = openstack_utils.get_swift_session_client( + swift_session) + cls.glance_client = openstack_utils.get_glance_session_client( + cls.keystone_session) + + def test_100_create_image(self): + """Create an image and do a simple validation of it. + + The OpenStack Swift API is used to do the validation, since the Ceph + Rados Gateway serves an API which is compatible with that. + """ + image_name = 'zaza-ceph-rgw-image' + openstack_utils.create_image( + glance=self.glance_client, + image_url=openstack_utils.find_cirros_image(arch='x86_64'), + image_name=image_name, + backend='swift') + headers, containers = self.swift.get_account() + self.assertEqual(len(containers), 1) + container_name = containers[0].get('name') + headers, objects = self.swift.get_container(container_name) + images = openstack_utils.get_images_by_name( + self.glance_client, + image_name) + self.assertEqual(len(images), 1) + image = images[0] + total_bytes = 0 + for ob in objects: + if '{}-'.format(image['id']) in ob['name']: + total_bytes = total_bytes + int(ob['bytes']) + logging.info( + 'Checking glance image size {} matches swift ' + 'image size {}'.format(image['size'], total_bytes)) + self.assertEqual(image['size'], total_bytes) + openstack_utils.delete_image(self.glance_client, image['id']) + + +class GlanceExternalS3Test(test_utils.OpenStackBaseTest): + """Encapsulate glance tests using an external S3 backend.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running glance tests with S3 backend.""" + super(GlanceExternalS3Test, cls).setUpClass() + cls.glance_client = openstack_utils.get_glance_session_client( + cls.keystone_session + ) + + configs = model.get_application_config("glance") + cls.s3_store_host = configs["s3-store-host"]["value"] + cls.s3_store_access_key = configs["s3-store-access-key"]["value"] + cls.s3_store_secret_key = configs["s3-store-secret-key"]["value"] + cls.s3_store_bucket = configs["s3-store-bucket"]["value"] + + def test_100_create_delete_image(self): + """Create an image and do a simple validation of it. + + Validate the size of the image in both Glance API and actual S3 bucket. + """ + image_name = "zaza-s3-test-image" + openstack_utils.create_image( + glance=self.glance_client, + image_url=openstack_utils.find_cirros_image(arch="x86_64"), + image_name=image_name, + backend="s3", + ) + images = openstack_utils.get_images_by_name( + self.glance_client, image_name + ) + self.assertEqual(len(images), 1) + image = images[0] + + s3_client = boto3.client( + "s3", + endpoint_url=self.s3_store_host, + aws_access_key_id=self.s3_store_access_key, + aws_secret_access_key=self.s3_store_secret_key, + ) + response = s3_client.head_object( + Bucket=self.s3_store_bucket, Key=image["id"] + ) + logging.info( + "Checking glance image size {} matches S3 object's ContentLength " + "{}".format(image["size"], response["ContentLength"]) + ) + self.assertEqual(image["size"], response["ContentLength"]) + openstack_utils.delete_image(self.glance_client, image["id"]) diff --git a/zaza/openstack/charm_tests/glance_simplestreams_sync/__init__.py b/zaza/openstack/charm_tests/glance_simplestreams_sync/__init__.py new file mode 100644 index 0000000..7667e52 --- /dev/null +++ b/zaza/openstack/charm_tests/glance_simplestreams_sync/__init__.py @@ -0,0 +1,16 @@ + +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing glance-simplestreams-sync.""" diff --git a/zaza/openstack/charm_tests/glance_simplestreams_sync/setup.py b/zaza/openstack/charm_tests/glance_simplestreams_sync/setup.py new file mode 100644 index 0000000..96648af --- /dev/null +++ b/zaza/openstack/charm_tests/glance_simplestreams_sync/setup.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for configuring glance-simplestreams-sync.""" + +import logging +import tenacity +import pprint + +import zaza.model as zaza_model +import zaza.openstack.utilities.generic as generic_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +def _get_catalog(): + """Retrieve the Keystone service catalog. + + :returns: The raw Keystone service catalog. + :rtype: List[Dict] + """ + keystone_session = openstack_utils.get_overcloud_keystone_session() + keystone_client = openstack_utils.get_keystone_session_client( + keystone_session) + + token = keystone_session.get_token() + token_data = keystone_client.tokens.get_token_data(token) + + if 'catalog' not in token_data['token']: + raise ValueError('catalog not in token data: "{}"' + .format(pprint.pformat(token_data))) + + return token_data['token']['catalog'] + + +def sync_images(): + """Run image sync using an action. + + Execute an initial image sync using an action to ensure that the + cloud is populated with images at the right point in time during + deployment. + """ + logging.info("Synchronising images using glance-simplestreams-sync") + + catalog = None + try: + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(3), + wait=tenacity.wait_exponential( + multiplier=1, min=2, max=10), + reraise=True): + with attempt: + # Proactively retrieve the Keystone service catalog so that we + # can log it in the event of a failure. + catalog = _get_catalog() + generic_utils.assertActionRanOK( + zaza_model.run_action_on_leader( + "glance-simplestreams-sync", + "sync-images", + raise_on_failure=True, + action_params={}, + ) + ) + except Exception: + logging.info('Contents of Keystone service catalog: "{}"' + .format(pprint.pformat(catalog))) + raise diff --git a/zaza/openstack/charm_tests/glance_simplestreams_sync/tests.py b/zaza/openstack/charm_tests/glance_simplestreams_sync/tests.py new file mode 100644 index 0000000..0c0d472 --- /dev/null +++ b/zaza/openstack/charm_tests/glance_simplestreams_sync/tests.py @@ -0,0 +1,127 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate glance-simplestreams-sync testing.""" +import json +import logging +import requests +import tenacity + +import zaza.model as zaza_model +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +@tenacity.retry( + retry=tenacity.retry_if_result(lambda images: len(images) < 4), + wait=tenacity.wait_fixed(6), # interval between retries + stop=tenacity.stop_after_attempt(100)) # retry times +def retry_image_sync(glance_client): + """Wait for image sync with retry.""" + # convert generator to list + return list(glance_client.images.list()) + + +@tenacity.retry( + retry=tenacity.retry_if_exception_type(json.decoder.JSONDecodeError), + wait=tenacity.wait_fixed(10), reraise=True, + stop=tenacity.stop_after_attempt(10)) +def get_product_streams(url): + """Get product streams json data with retry.""" + # There is a race between the images being available in glance and any + # metadata being written. Use tenacity to avoid this race. + client = requests.session() + json_data = client.get(url, verify=openstack_utils.get_cacert()).text + return json.loads(json_data) + + +class GlanceSimpleStreamsSyncTest(test_utils.OpenStackBaseTest): + """Glance Simple Streams Sync Test.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running glance simple streams sync tests.""" + super(GlanceSimpleStreamsSyncTest, cls).setUpClass() + # dict of OS_* env vars + overcloud_auth = openstack_utils.get_overcloud_auth() + cls.keystone_client = openstack_utils.get_keystone_client( + overcloud_auth) + cls.glance_client = openstack_utils.get_glance_session_client( + cls.keystone_session) + + def test_010_wait_for_image_sync(self): + """Wait for images to be synced. Expect at least four.""" + self.assertTrue(retry_image_sync(self.glance_client)) + + def test_050_gss_permissions_regression_check_lp1611987(self): + """Assert the intended file permissions on gss config files. + + refer: https://bugs.launchpad.net/bugs/1611987 + """ + file_paths = [ + '/etc/glance-simplestreams-sync/identity.yaml', + '/etc/glance-simplestreams-sync/mirrors.yaml', + '/var/log/glance-simplestreams-sync.log', + ] + expected_perms = '640' + + application = 'glance-simplestreams-sync' + for unit in zaza_model.get_units(application): + for file_path in file_paths: + cmd = 'stat -c %a {}'.format(file_path) + result = zaza_model.run_on_unit(unit.name, cmd, timeout=30) + # {'Code': '', 'Stderr': '', 'Stdout': '644\n'} + perms = result.get('Stdout', '').strip() + self.assertEqual(perms, expected_perms) + logging.debug( + 'Permissions on {}: {}'.format(file_path, perms)) + + def test_110_local_product_stream(self): + """Verify that the local product stream is accessible and has data.""" + logging.debug('Checking local product streams...') + expected_images = [ + 'com.ubuntu.cloud:server:14.04:amd64', + 'com.ubuntu.cloud:server:16.04:amd64', + 'com.ubuntu.cloud:server:18.04:amd64', + 'com.ubuntu.cloud:server:20.04:amd64', + ] + uri = "streams/v1/auto.sync.json" + + # There is a race between the images being available in glance and the + # metadata being written for each image. Use tenacity to avoid this + # race and make the test idempotent. + @tenacity.retry( + retry=tenacity.retry_if_exception_type( + (AssertionError, KeyError) + ), + wait=tenacity.wait_fixed(10), reraise=True, + stop=tenacity.stop_after_attempt(25)) + def _check_local_product_streams(expected_images): + # Refresh from catalog as URL may change if swift in use. + ps_interface = self.keystone_client.service_catalog.url_for( + service_type='product-streams', interface='publicURL' + ) + url = "{}/{}".format(ps_interface, uri) + logging.info('Retrieving product stream information' + ' from {}'.format(url)) + product_streams = get_product_streams(url) + logging.debug(product_streams) + images = product_streams["products"] + + for image in expected_images: + self.assertIn(image, images) + + _check_local_product_streams(expected_images) + + logging.debug("Local product stream successful") diff --git a/zaza/openstack/charm_tests/gnocchi/__init__.py b/zaza/openstack/charm_tests/gnocchi/__init__.py new file mode 100644 index 0000000..3225ba0 --- /dev/null +++ b/zaza/openstack/charm_tests/gnocchi/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing gnocchi.""" diff --git a/zaza/openstack/charm_tests/gnocchi/setup.py b/zaza/openstack/charm_tests/gnocchi/setup.py new file mode 100644 index 0000000..4995f22 --- /dev/null +++ b/zaza/openstack/charm_tests/gnocchi/setup.py @@ -0,0 +1,69 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Setup for Gnocchi tests.""" + +import logging + +import zaza.model as model +import zaza.openstack.utilities.openstack as openstack_utils + + +def configure_s3_backend(): + """Inject S3 parameters from Swift for Gnocchi config.""" + session = openstack_utils.get_overcloud_keystone_session() + ks_client = openstack_utils.get_keystone_session_client(session) + + logging.info('Retrieving S3 connection data from Swift') + token_data = ks_client.tokens.get_token_data(session.get_token()) + project_id = token_data['token']['project']['id'] + user_id = token_data['token']['user']['id'] + + # Store URL to service providing S3 compatible API + for entry in token_data['token']['catalog']: + if entry['type'] == 's3': + for endpoint in entry['endpoints']: + if endpoint['interface'] == 'public': + s3_region = endpoint['region'] + s3_endpoint = endpoint['url'] + + # Create AWS compatible application credentials in Keystone + ec2_creds = ks_client.ec2.create(user_id, project_id) + + logging.info('Changing Gnocchi charm config to connect to S3') + model.set_application_config( + 'gnocchi', + {'s3-endpoint-url': s3_endpoint, + 's3-region-name': s3_region, + 's3-access-key-id': ec2_creds.access, + 's3-secret-access-key': ec2_creds.secret} + ) + logging.info('Waiting for units to execute config-changed hook') + model.wait_for_agent_status() + logging.info('Waiting for units to reach target states') + model.wait_for_application_states( + states={ + 'gnocchi': { + 'workload-status-': 'active', + 'workload-status-message': 'Unit is ready' + }, + 'ceilometer': { + 'workload-status': 'blocked', + 'workload-status-message': 'Run the ' + + 'ceilometer-upgrade action on the leader ' + + 'to initialize ceilometer and gnocchi' + } + } + ) + model.block_until_all_units_idle() diff --git a/zaza/openstack/charm_tests/gnocchi/tests.py b/zaza/openstack/charm_tests/gnocchi/tests.py new file mode 100644 index 0000000..ed4bac0 --- /dev/null +++ b/zaza/openstack/charm_tests/gnocchi/tests.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate Gnocchi testing.""" + +import base64 +import boto3 +import logging +import pprint +from gnocchiclient.v1 import client as gnocchi_client + +import zaza.model as model +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities as utilities +import zaza.openstack.utilities.openstack as openstack_utils + + +class GnocchiTest(test_utils.OpenStackBaseTest): + """Encapsulate Gnocchi tests.""" + + @property + def services(self): + """Return a list of services for the selected OpenStack release.""" + return ['haproxy', 'gnocchi-metricd', 'apache2'] + + def test_200_api_connection(self): + """Simple api calls to check service is up and responding.""" + logging.info('Instantiating gnocchi client...') + overcloud_auth = openstack_utils.get_overcloud_auth() + keystone = openstack_utils.get_keystone_client(overcloud_auth) + gnocchi_ep = keystone.service_catalog.url_for( + service_type='metric', + interface='publicURL' + ) + gnocchi = gnocchi_client.Client( + session=openstack_utils.get_overcloud_keystone_session(), + adapter_options={ + 'endpoint_override': gnocchi_ep, + } + ) + + logging.info('Checking api functionality...') + assert(gnocchi.status.get() != []) + + def test_910_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started. + """ + with self.pause_resume(self.services): + logging.info("Testing pause and resume") + + +class GnocchiS3Test(test_utils.OpenStackBaseTest): + """Test Gnocchi for S3 storage backend.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(GnocchiS3Test, cls).setUpClass() + + session = openstack_utils.get_overcloud_keystone_session() + ks_client = openstack_utils.get_keystone_session_client(session) + + # Get token data so we can clean our user_id and project_id + token_data = ks_client.tokens.get_token_data(session.get_token()) + project_id = token_data['token']['project']['id'] + user_id = token_data['token']['user']['id'] + + # Store URL to service providing S3 compatible API + for entry in token_data['token']['catalog']: + if entry['type'] == 's3': + for endpoint in entry['endpoints']: + if endpoint['interface'] == 'public': + cls.s3_region = endpoint['region'] + cls.s3_endpoint = endpoint['url'] + + # Create AWS compatible application credentials in Keystone + cls.ec2_creds = ks_client.ec2.create(user_id, project_id) + + def test_s3_list_gnocchi_buckets(self): + """Verify that the gnocchi buckets were created in the S3 backend.""" + kwargs = { + 'region_name': self.s3_region, + 'aws_access_key_id': self.ec2_creds.access, + 'aws_secret_access_key': self.ec2_creds.secret, + 'endpoint_url': self.s3_endpoint, + 'verify': self.cacert, + } + s3_client = boto3.client('s3', **kwargs) + + bucket_names = ['gnocchi-measure', 'gnocchi-aggregates'] + # Validate their presence + bucket_list = s3_client.list_buckets() + logging.info(pprint.pformat(bucket_list)) + for bkt in bucket_list['Buckets']: + for gnocchi_bkt in bucket_names: + if bkt['Name'] == gnocchi_bkt: + break + else: + AssertionError('Bucket "{}" not found'.format(gnocchi_bkt)) + + +class GnocchiExternalCATest(test_utils.OpenStackBaseTest): + """Test Gnocchi for external root CA config option.""" + + def test_upload_external_cert(self): + """Verify that the external CA is uploaded correctly.""" + logging.info('Changing value for trusted-external-ca-cert.') + ca_cert_option = 'trusted-external-ca-cert' + ppk, cert = utilities.cert.generate_cert('gnocchi_test.ci.local') + b64_cert = base64.b64encode(cert).decode() + config = { + ca_cert_option: b64_cert, + } + model.set_application_config( + 'gnocchi', + config + ) + model.block_until_all_units_idle() + + files = [ + '/usr/local/share/ca-certificates/gnocchi-external.crt', + '/etc/ssl/certs/gnocchi-external.pem', + ] + + for file in files: + logging.info("Validating that {} is created.".format(file)) + model.block_until_file_has_contents('gnocchi', file, 'CERTIFICATE') + logging.info("Found {} successfully.".format(file)) diff --git a/zaza/openstack/charm_tests/hacluster/__init__.py b/zaza/openstack/charm_tests/hacluster/__init__.py new file mode 100644 index 0000000..3825f7d --- /dev/null +++ b/zaza/openstack/charm_tests/hacluster/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing hacluster.""" diff --git a/zaza/openstack/charm_tests/hacluster/tests.py b/zaza/openstack/charm_tests/hacluster/tests.py new file mode 100644 index 0000000..9414145 --- /dev/null +++ b/zaza/openstack/charm_tests/hacluster/tests.py @@ -0,0 +1,196 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HACluster testing.""" + +import logging +import os + +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.configure.hacluster +import zaza.utilities.juju as juju_utils + + +class HaclusterBaseTest(test_utils.OpenStackBaseTest): + """Base class for hacluster tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running hacluster tests.""" + super(HaclusterBaseTest, cls).setUpClass() + cls.vip = os.environ.get("TEST_VIP00") + + +class HaclusterTest(HaclusterBaseTest): + """hacluster tests.""" + + def test_900_action_cleanup(self): + """The services can be cleaned up.""" + zaza.model.run_action_on_leader( + self.application_name, + 'cleanup', + raise_on_failure=True) + + def test_910_pause_and_resume(self): + """The services can be paused and resumed.""" + with self.pause_resume([]): + logging.info("Testing pause resume") + + def _toggle_maintenance_and_wait(self, expected): + """Configure cluster maintenance-mode. + + :param expected: expected value to set maintenance-mode + """ + config = {"maintenance-mode": expected} + logging.info("Setting config to {}".format(config)) + zaza.model.set_application_config(self.application_name, config) + if expected == 'true': + _states = {"hacluster": { + "workload-status": "maintenance", + "workload-status-message": "Pacemaker in maintenance mode"}} + else: + _states = {"hacluster": { + "workload-status": "active", + "workload-status-message": "Unit is ready and clustered"}} + zaza.model.wait_for_application_states(states=_states) + logging.debug('OK') + + def test_920_put_in_maintenance(self): + """Put pacemaker in maintenance mode.""" + logging.debug('Setting cluster in maintenance mode') + + self._toggle_maintenance_and_wait('true') + self._toggle_maintenance_and_wait('false') + + +class HaclusterScaleBackAndForthTest(HaclusterBaseTest): + """hacluster tests scaling back and forth.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running hacluster tests.""" + super(HaclusterScaleBackAndForthTest, cls).setUpClass() + test_config = cls.test_config['tests_options']['hacluster'] + cls._principle_app_name = test_config['principle-app-name'] + cls._hacluster_charm_name = test_config['hacluster-charm-name'] + + def test_930_scaleback(self): + """Remove one unit, recalculate quorum and re-add one unit. + + NOTE(lourot): before lp:1400481 was fixed, the corosync ring wasn't + recalculated when removing units. So within a cluster of 3 units, + removing a unit and re-adding one led to a situation where corosync + considers having 3 nodes online out of 4, instead of just 3 out of 3. + This test covers this scenario. + """ + principle_units = sorted(zaza.model.get_status().applications[ + self._principle_app_name]['units'].keys()) + self.assertEqual(len(principle_units), 3) + surviving_principle_unit = principle_units[0] + doomed_principle_unit = principle_units[1] + surviving_hacluster_unit = juju_utils.get_subordinate_units( + [surviving_principle_unit], + charm_name=self._hacluster_charm_name)[0] + doomed_hacluster_unit = juju_utils.get_subordinate_units( + [doomed_principle_unit], + charm_name=self._hacluster_charm_name)[0] + + logging.info('Pausing unit {}'.format(doomed_hacluster_unit)) + zaza.model.run_action( + doomed_hacluster_unit, + 'pause', + raise_on_failure=True) + + logging.info('Removing {}'.format(doomed_principle_unit)) + zaza.model.destroy_unit( + self._principle_app_name, + doomed_principle_unit, + wait_disappear=True) + + logging.info('Waiting for model to settle') + zaza.model.block_until_unit_wl_status(surviving_hacluster_unit, + 'blocked') + # NOTE(lourot): the surviving principle units (usually keystone units) + # aren't guaranteed to be blocked, so we don't validate that here. + zaza.model.block_until_all_units_idle() + + # At this point the corosync ring hasn't been updated yet, so it should + # still remember the deleted unit: + self.__assert_some_corosync_nodes_are_offline(surviving_hacluster_unit) + + logging.info('Updating corosync ring') + hacluster_app_name = zaza.model.get_unit_from_name( + surviving_hacluster_unit).application + zaza.model.run_action_on_leader( + hacluster_app_name, + 'update-ring', + action_params={'i-really-mean-it': True}, + raise_on_failure=True) + + # At this point if the corosync ring has been properly updated, there + # shouldn't be any trace of the deleted unit anymore: + self.__assert_all_corosync_nodes_are_online(surviving_hacluster_unit) + + logging.info('Re-adding an hacluster unit') + zaza.model.add_unit(self._principle_app_name, wait_appear=True) + + logging.info('Waiting for model to settle') + # NOTE(lourot): the principle charm may remain blocked here. This seems + # to happen often when it is keystone and has a mysql-router as other + # subordinate charm. The keystone units seems to often remain blocked + # with 'Database not initialised'. This is not the hacluster charm's + # fault and this is why we don't validate here that the entire model + # goes back to active/idle. + zaza.model.block_until_unit_wl_status(surviving_hacluster_unit, + 'active') + zaza.model.block_until_all_units_idle() + + # Because of lp:1874719 the corosync ring may show a mysterious offline + # 'node1' node. We clean up the ring by re-running the 'update-ring' + # action: + logging.info('Updating corosync ring - workaround for lp:1874719') + zaza.model.run_action_on_leader( + hacluster_app_name, + 'update-ring', + action_params={'i-really-mean-it': True}, + raise_on_failure=True) + + # At this point the corosync ring should not contain any offline node: + self.__assert_all_corosync_nodes_are_online(surviving_hacluster_unit) + + def __assert_some_corosync_nodes_are_offline(self, hacluster_unit): + logging.info('Checking that corosync considers at least one node to ' + 'be offline') + output = self._get_crm_status(hacluster_unit) + self.assertIn('OFFLINE', output, + "corosync should list at least one offline node") + + def __assert_all_corosync_nodes_are_online(self, hacluster_unit): + logging.info('Checking that corosync considers all nodes to be online') + output = self._get_crm_status(hacluster_unit) + self.assertNotIn('OFFLINE', output, + "corosync shouldn't list any offline node") + + @staticmethod + def _get_crm_status(hacluster_unit): + cmd = 'sudo crm status' + result = zaza.model.run_on_unit(hacluster_unit, cmd) + code = result.get('Code') + if code != '0': + raise zaza.model.CommandRunFailed(cmd, result) + output = result.get('Stdout').strip() + logging.debug('crm output received: {}'.format(output)) + return output diff --git a/zaza/openstack/charm_tests/heat/__init__.py b/zaza/openstack/charm_tests/heat/__init__.py new file mode 100644 index 0000000..276455e --- /dev/null +++ b/zaza/openstack/charm_tests/heat/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing heat.""" diff --git a/zaza/openstack/charm_tests/heat/tests.py b/zaza/openstack/charm_tests/heat/tests.py new file mode 100644 index 0000000..24062b7 --- /dev/null +++ b/zaza/openstack/charm_tests/heat/tests.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python3 +# +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate heat testing.""" +import logging +import json +import os +import subprocess +from urllib import parse as urlparse +from heatclient.common import template_utils + +import zaza.model +import zaza.openstack.charm_tests.nova.utils as nova_utils +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.openstack as openstack_utils +import zaza.charm_lifecycle.utils as charm_lifecycle_utils + +# Resource and name constants +IMAGE_NAME = 'cirros' +STACK_NAME = 'hello_world' +RESOURCE_TYPE = 'server' +TEMPLATES_PATH = 'files' +FLAVOR_NAME = 'm1.tiny' + + +class HeatBasicDeployment(test_utils.OpenStackBaseTest): + """Encapsulate Heat tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running Heat tests.""" + super(HeatBasicDeployment, cls).setUpClass() + cls.application = 'heat' + cls.keystone_session = openstack_utils.get_overcloud_keystone_session() + cls.heat_client = openstack_utils.get_heat_session_client( + cls.keystone_session) + cls.glance_client = openstack_utils.get_glance_session_client( + cls.keystone_session) + cls.nova_client = openstack_utils.get_nova_session_client( + cls.keystone_session) + + @property + def services(self): + """Return a list services for the selected OpenStack release. + + :returns: List of services + :rtype: [str] + """ + services = ['heat-api', 'heat-api-cfn', 'heat-engine'] + return services + + def test_100_domain_setup(self): + """Run required action for a working Heat unit.""" + # Action is REQUIRED to run for a functioning heat deployment + logging.info('Running domain-setup action on heat unit...') + unit = zaza.model.get_units(self.application_name)[0] + zaza.model.block_until_unit_wl_status(unit.entity_id, "active") + zaza.model.run_action(unit.entity_id, "domain-setup") + zaza.model.block_until_unit_wl_status(unit.entity_id, "active") + + def test_400_heat_resource_types_list(self): + """Check default resource list behavior and confirm functionality.""" + logging.info('Checking default heat resource list...') + types = self.heat_client.resource_types.list() + self.assertIsInstance(types, list, "Resource type is not a list!") + self.assertGreater(len(types), 0, "Resource type list len is zero") + + def test_410_heat_stack_create_delete(self): + """Create stack, confirm nova compute resource, delete stack.""" + # Verify new image name + images_list = list(self.glance_client.images.list()) + self.assertEqual(images_list[0].name, IMAGE_NAME, + "glance image create failed or unexpected") + + # Create a heat stack from a heat template, verify its status + logging.info('Creating heat stack...') + t_name = 'hot_hello_world.yaml' + if (openstack_utils.get_os_release() < + openstack_utils.get_os_release('xenial_queens')): + os_release = 'icehouse' + else: + os_release = 'queens' + + # Get location of template files in charm-heat + bundle_path = charm_lifecycle_utils.BUNDLE_DIR + if bundle_path[-1:] == "/": + bundle_path = bundle_path[0:-1] + + file_rel_path = os.path.join(os.path.dirname(bundle_path), + TEMPLATES_PATH, os_release, t_name) + file_abs_path = os.path.abspath(file_rel_path) + t_url = urlparse.urlparse(file_abs_path, scheme='file').geturl() + logging.info('template url: {}'.format(t_url)) + + r_req = self.heat_client.http_client + t_files, template = template_utils.get_template_contents(t_url, r_req) + env_files, env = template_utils.process_environment_and_files( + env_path=None) + + fields = { + 'stack_name': STACK_NAME, + 'timeout_mins': '15', + 'disable_rollback': False, + 'parameters': { + 'admin_pass': 'Ubuntu', + 'key_name': nova_utils.KEYPAIR_NAME, + 'image': IMAGE_NAME + }, + 'template': template, + 'files': dict(list(t_files.items()) + list(env_files.items())), + 'environment': env + } + + # Create the stack + try: + stack = self.heat_client.stacks.create(**fields) + logging.info('Stack data: {}'.format(stack)) + stack_id = stack['stack']['id'] + logging.info('Creating new stack, ID: {}'.format(stack_id)) + except Exception as e: + # Generally, an api or cloud config error if this is hit. + msg = 'Failed to create heat stack: {}'.format(e) + self.fail(msg) + + # Confirm stack reaches COMPLETE status. + # /!\ Heat stacks reach a COMPLETE status even when nova cannot + # find resources (a valid hypervisor) to fit the instance, in + # which case the heat stack self-deletes! Confirm anyway... + openstack_utils.resource_reaches_status(self.heat_client.stacks, + stack_id, + expected_status="COMPLETE", + msg="Stack status wait") + # List stack + stacks = list(self.heat_client.stacks.list()) + logging.info('All stacks: {}'.format(stacks)) + + # Get stack information + try: + stack = self.heat_client.stacks.get(STACK_NAME) + except Exception as e: + # Generally, a resource availability issue if this is hit. + msg = 'Failed to get heat stack: {}'.format(e) + self.fail(msg) + + # Confirm stack name. + logging.info('Expected, actual stack name: {}, ' + '{}'.format(STACK_NAME, stack.stack_name)) + self.assertEqual(stack.stack_name, STACK_NAME, + 'Stack name mismatch, ' + '{} != {}'.format(STACK_NAME, stack.stack_name)) + + # Confirm existence of a heat-generated nova compute resource + logging.info('Confirming heat stack resource status...') + resource = self.heat_client.resources.get(STACK_NAME, RESOURCE_TYPE) + server_id = resource.physical_resource_id + self.assertTrue(server_id, "Stack failed to spawn a compute resource.") + + # Confirm nova instance reaches ACTIVE status + openstack_utils.resource_reaches_status(self.nova_client.servers, + server_id, + expected_status="ACTIVE", + msg="nova instance") + logging.info('Nova instance reached ACTIVE status') + + # Delete stack + logging.info('Deleting heat stack...') + openstack_utils.delete_resource(self.heat_client.stacks, + STACK_NAME, msg="heat stack") + + def test_500_auth_encryption_key_same_on_units(self): + """Test the auth_encryption_key in heat.conf is same on all units.""" + logging.info("Checking the 'auth_encryption_key' is the same on " + "all units.") + output, ret = self._run_arbitrary( + "--application heat " + "--format json " + "grep auth_encryption_key /etc/heat/heat.conf") + if ret: + msg = "juju run error: ret: {}, output: {}".format(ret, output) + self.assertEqual(ret, 0, msg) + output = json.loads(output) + keys = {} + for r in output: + k = r['Stdout'].split('=')[1].strip() + keys[r['UnitId']] = k + # see if keys are different + ks = set(keys.values()) + self.assertEqual(len(ks), 1, "'auth_encryption_key' is not identical " + "on every unit: {}".format("{}={}".format(k, v) + for k, v in keys.items())) + + @staticmethod + def _run_arbitrary(command, timeout=300): + """Run an arbitrary command (as root), but not necessarily on a unit. + + (Otherwise the self.run(...) command could have been used for the unit + + :param command: The command to run. + :type command: str + :param timeout: Seconds to wait before timing out. + :type timeout: int + :raises: subprocess.CalledProcessError. + :returns: A pair containing the output of the command and exit value + :rtype: (str, int) + """ + cmd = ['juju', 'run', '--timeout', "{}s".format(timeout), + ] + command.split() + p = subprocess.Popen( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + stdout, stderr = p.communicate() + output = stdout if p.returncode == 0 else stderr + return output.decode('utf8').strip(), p.returncode + + def test_900_heat_restart_on_config_change(self): + """Verify the specified services are restarted when config changes.""" + logging.info('Testing restart on configuration change') + + # Expected default and alternate values + set_default = {'use-syslog': 'False'} + set_alternate = {'use-syslog': 'True'} + + # Config file affected by juju set config change + conf_file = '/etc/heat/heat.conf' + + # Make config change, check for service restarts + # In Amulet we waited 30 seconds...do we still need to? + logging.info('Making configuration change') + self.restart_on_changed( + conf_file, + set_default, + set_alternate, + None, + None, + self.services) + + def test_910_pause_and_resume(self): + """Run services pause and resume tests.""" + logging.info('Checking pause and resume actions...') + + with self.pause_resume(self.services): + logging.info("Testing pause resume") diff --git a/zaza/openstack/charm_tests/ironic/__init__.py b/zaza/openstack/charm_tests/ironic/__init__.py new file mode 100644 index 0000000..aedad05 --- /dev/null +++ b/zaza/openstack/charm_tests/ironic/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing ironic.""" diff --git a/zaza/openstack/charm_tests/ironic/setup.py b/zaza/openstack/charm_tests/ironic/setup.py new file mode 100644 index 0000000..fe7d034 --- /dev/null +++ b/zaza/openstack/charm_tests/ironic/setup.py @@ -0,0 +1,184 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for configuring ironic.""" + +import copy +import os +import tenacity + +import zaza.openstack.charm_tests.glance.setup as glance_setup +import zaza.openstack.utilities.openstack as openstack_utils +from zaza.openstack.utilities import ( + cli as cli_utils, +) +import zaza.model as zaza_model + + +FLAVORS = { + 'bm1.small': { + 'flavorid': 2, + 'ram': 2048, + 'disk': 20, + 'vcpus': 1, + 'properties': { + "resources:CUSTOM_BAREMETAL1_SMALL": 1, + }, + }, + 'bm1.medium': { + 'flavorid': 3, + 'ram': 4096, + 'disk': 40, + 'vcpus': 2, + 'properties': { + "resources:CUSTOM_BAREMETAL1_MEDIUM": 1, + }, + }, + 'bm1.large': { + 'flavorid': 4, + 'ram': 8192, + 'disk': 40, + 'vcpus': 4, + 'properties': { + "resources:CUSTOM_BAREMETAL1_LARGE": 1, + }, + }, + 'bm1.tempest': { + 'flavorid': 6, + 'ram': 256, + 'disk': 1, + 'vcpus': 1, + 'properties': { + "resources:CUSTOM_BAREMETAL1_TEMPEST": 1, + }, + }, + 'bm2.tempest': { + 'flavorid': 7, + 'ram': 512, + 'disk': 1, + 'vcpus': 1, + 'properties': { + "resources:CUSTOM_BAREMETAL2_TEMPEST": 1, + }, + }, +} + + +def _add_image(url, image_name, backend="swift", + disk_format="raw", container_format="bare"): + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(3), + reraise=True): + with attempt: + glance_setup.add_image( + url, + image_name=image_name, + backend=backend, + disk_format=disk_format, + container_format=container_format) + + +def add_ironic_deployment_image(initrd_url=None, kernel_url=None): + """Add Ironic deploy images to glance. + + :param initrd_url: URL where the ari image resides + :type initrd_url: str + :param kernel_url: URL where the aki image resides + :type kernel_url: str + """ + base_name = 'ironic-deploy' + initrd_name = "{}-initrd".format(base_name) + vmlinuz_name = "{}-vmlinuz".format(base_name) + if not initrd_url: + initrd_url = os.environ.get('TEST_IRONIC_DEPLOY_INITRD', None) + if not kernel_url: + kernel_url = os.environ.get('TEST_IRONIC_DEPLOY_VMLINUZ', None) + if not all([initrd_url, kernel_url]): + raise ValueError("Missing required deployment image URLs") + + _add_image( + initrd_url, + initrd_name, + backend="swift", + disk_format="ari", + container_format="ari") + + _add_image( + kernel_url, + vmlinuz_name, + backend="swift", + disk_format="aki", + container_format="aki") + + +def add_ironic_os_image(image_url=None): + """Upload the operating system images built for bare metal deployments. + + :param image_url: URL where the image resides + :type image_url: str + """ + image_url = image_url or os.environ.get( + 'TEST_IRONIC_RAW_BM_IMAGE', None) + image_name = "baremetal-ubuntu-image" + if image_url is None: + raise ValueError("Missing image_url") + + _add_image( + image_url, + image_name, + backend="swift", + disk_format="raw", + container_format="bare") + + +def set_temp_url_secret(): + """Run the set-temp-url-secret on the ironic-conductor leader. + + This is needed if direct boot method is enabled. + """ + zaza_model.run_action_on_leader( + 'ironic-conductor', + 'set-temp-url-secret', + action_params={}) + + +def create_bm_flavors(nova_client=None): + """Create baremetal flavors. + + :param nova_client: Authenticated nova client + :type nova_client: novaclient.v2.client.Client + """ + if not nova_client: + keystone_session = openstack_utils.get_overcloud_keystone_session() + nova_client = openstack_utils.get_nova_session_client( + keystone_session) + cli_utils.setup_logging() + names = [flavor.name for flavor in nova_client.flavors.list()] + # Disable scheduling based on standard flavor properties + default_properties = { + "resources:VCPU": 0, + "resources:MEMORY_MB": 0, + "resources:DISK_GB": 0, + } + for flavor in FLAVORS.keys(): + if flavor not in names: + properties = copy.deepcopy(default_properties) + properties.update(FLAVORS[flavor]["properties"]) + bm_flavor = nova_client.flavors.create( + name=flavor, + ram=FLAVORS[flavor]['ram'], + vcpus=FLAVORS[flavor]['vcpus'], + disk=FLAVORS[flavor]['disk'], + flavorid=FLAVORS[flavor]['flavorid']) + bm_flavor.set_keys(properties) diff --git a/zaza/openstack/charm_tests/ironic/tests.py b/zaza/openstack/charm_tests/ironic/tests.py new file mode 100644 index 0000000..9cfb85a --- /dev/null +++ b/zaza/openstack/charm_tests/ironic/tests.py @@ -0,0 +1,83 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate ironic testing.""" + +import logging + +import ironicclient.client as ironic_client +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +def _get_ironic_client(ironic_api_version="1.58"): + keystone_session = openstack_utils.get_overcloud_keystone_session() + ironic = ironic_client.Client(1, session=keystone_session, + os_ironic_api_version=ironic_api_version) + return ironic + + +class IronicTest(test_utils.OpenStackBaseTest): + """Run Ironic specific tests.""" + + _SERVICES = ['ironic-api'] + + def test_110_catalog_endpoints(self): + """Verify that the endpoints are present in the catalog.""" + overcloud_auth = openstack_utils.get_overcloud_auth() + keystone_client = openstack_utils.get_keystone_client( + overcloud_auth) + actual_endpoints = keystone_client.service_catalog.get_endpoints() + actual_interfaces = [endpoint['interface'] for endpoint in + actual_endpoints["baremetal"]] + for expected_interface in ('internal', 'admin', 'public'): + assert(expected_interface in actual_interfaces) + + def test_400_api_connection(self): + """Simple api calls to check service is up and responding.""" + ironic = _get_ironic_client() + + logging.info('listing conductors') + conductors = ironic.conductor.list() + assert(len(conductors) > 0) + + # By default, only IPMI HW type is enabled. iDrac and Redfish + # can optionally be enabled + drivers = ironic.driver.list() + driver_names = [drv.name for drv in drivers] + + expected = ['intel-ipmi', 'ipmi'] + for exp in expected: + assert(exp in driver_names) + assert(len(driver_names) == 2) + + def test_900_restart_on_config_change(self): + """Checking restart happens on config change. + + Change debug mode and assert that change propagates to the correct + file and that services are restarted as a result + """ + self.restart_on_changed_debug_oslo_config_file( + '/etc/ironic/ironic.conf', self._SERVICES) + + def test_910_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + logging.info('Skipping pause resume test LP: #1886202...') + return + with self.pause_resume(self._SERVICES): + logging.info("Testing pause resume") diff --git a/zaza/openstack/charm_tests/kerberos/__init__.py b/zaza/openstack/charm_tests/kerberos/__init__.py new file mode 100644 index 0000000..e257ccd --- /dev/null +++ b/zaza/openstack/charm_tests/kerberos/__init__.py @@ -0,0 +1,21 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing keystone-kerberos.""" + + +class KerberosConfigurationError(Exception): + """Custom exception for Kerberos test server.""" + + pass diff --git a/zaza/openstack/charm_tests/kerberos/setup.py b/zaza/openstack/charm_tests/kerberos/setup.py new file mode 100644 index 0000000..11f4afa --- /dev/null +++ b/zaza/openstack/charm_tests/kerberos/setup.py @@ -0,0 +1,239 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Setup for keystone-kerberos tests.""" + +import logging +import tempfile +import zaza.model +from zaza.openstack.utilities import openstack as openstack_utils +from zaza.openstack.charm_tests.kerberos import KerberosConfigurationError + + +def get_unit_full_hostname(unit_name): + """Retrieve the full hostname of a unit.""" + for unit in zaza.model.get_units(unit_name): + result = zaza.model.run_on_unit(unit.entity_id, 'hostname -f') + hostname = result['Stdout'].rstrip() + return hostname + + +def add_empty_resource_file_to_keystone_kerberos(): + """Add an empty resource to keystone kerberos to complete the setup.""" + logging.info('Attaching an empty keystone keytab to the keystone-kerberos' + ' unit') + tmp_file = '/tmp/empty.keytab' + with open(tmp_file, 'w'): + pass + + zaza.model.attach_resource('keystone-kerberos', + 'keystone_keytab', + tmp_file) + logging.info('Waiting for keystone-kerberos unit to be active and idle') + unit_name = zaza.model.get_units('keystone-kerberos')[0].name + zaza.model.block_until_unit_wl_status(unit_name, "active") + zaza.model.block_until_all_units_idle() + + +def add_dns_entry(kerberos_hostname="kerberos.testubuntu.com"): + """Add a dns entry in /etc/hosts for the kerberos test server. + + :param kerberos_hostname: FQDN of Kerberos server + :type kerberos_hostname: string + """ + logging.info('Retrieving kerberos IP and hostname') + kerberos_ip = zaza.model.get_app_ips("kerberos-server")[0] + cmd = "sudo sed -i '/localhost/i\\{}\t{}' /etc/hosts"\ + .format(kerberos_ip, kerberos_hostname) + + app_names = ['keystone', 'ubuntu-test-host'] + for app_name in app_names: + logging.info('Adding dns entry to the {} unit'.format(app_name)) + zaza_unit = zaza.model.get_units(app_name)[0] + zaza.model.run_on_unit(zaza_unit.entity_id, cmd) + + +def configure_keystone_service_in_kerberos(): + """Configure the keystone service in Kerberos. + + A principal needs to be added to the kerberos server to get a keytab for + this service. The keytab is used for the authentication of the keystone + service. + """ + logging.info('Configure keystone service in Kerberos') + unit = zaza.model.get_units('kerberos-server')[0] + keystone_hostname = get_unit_full_hostname('keystone') + commands = ['sudo kadmin.local -q "addprinc -randkey -clearpolicy ' + 'HTTP/{}"'.format(keystone_hostname), + 'sudo kadmin.local -q "ktadd ' + '-k /home/ubuntu/keystone.keytab ' + 'HTTP/{}"'.format(keystone_hostname), + 'sudo chmod 777 /home/ubuntu/keystone.keytab'] + + try: + for command in commands: + logging.info( + 'Sending command to the kerberos-server: {}'.format(command)) + result = zaza.model.run_on_unit(unit.name, command) + if result['Stderr']: + raise KerberosConfigurationError + elif result['Stdout']: + logging.info('Stdout: {}'.format(result['Stdout'])) + except KerberosConfigurationError: + logging.error('An error occured : {}'.format(result['Stderr'])) + + +def retrieve_and_attach_keytab(): + """Retrieve and attach the keytab to the keystone-kerberos unit.""" + kerberos_server = zaza.model.get_units('kerberos-server')[0] + + dump_file = "keystone.keytab" + remote_file = "/home/ubuntu/keystone.keytab" + with tempfile.TemporaryDirectory() as tmpdirname: + tmp_file = "{}/{}".format(tmpdirname, dump_file) + logging.info('Retrieving keystone.keytab from the kerberos server.') + zaza.model.scp_from_unit( + kerberos_server.name, + remote_file, + tmp_file) + logging.info('Attaching the keystone_keytab resource to ' + 'keystone-kerberos') + zaza.model.attach_resource('keystone-kerberos', + 'keystone_keytab', + tmp_file) + + # cs:ubuntu charm has changed behaviour and we can't rely on the workload + # staus message. Thus, ignore it. + states = { + "ubuntu-test-host": { + "workload-status": "active", + "workload-status-message": "", + } + } + zaza.model.wait_for_application_states(states=states) + zaza.model.block_until_all_units_idle() + + +def openstack_setup_kerberos(): + """Create a test domain, project, and user for kerberos tests.""" + kerberos_domain = 'k8s' + kerberos_project = 'k8s' + kerberos_user = 'admin' + kerberos_password = 'password123' + role = 'admin' + + logging.info('Retrieving a keystone session and client.') + keystone_session = openstack_utils.get_overcloud_keystone_session() + keystone_client = openstack_utils.get_keystone_session_client( + keystone_session) + logging.info('Creating domain, project and user for Kerberos tests.') + domain = keystone_client.domains.create(kerberos_domain, + description='Kerberos Domain', + enabled=True) + project = keystone_client.projects.create(kerberos_project, + domain, + description='Test project', + enabled=True) + demo_user = keystone_client.users.create(kerberos_user, + domain=domain, + project=project, + password=kerberos_password, + email='demo@demo.com', + description='Demo User', + enabled=True) + admin_role = keystone_client.roles.find(name=role) + keystone_client.roles.grant( + admin_role, + user=demo_user, + project_domain=domain, + project=project + ) + keystone_client.roles.grant( + admin_role, + user=demo_user, + domain=domain + ) + + +def setup_kerberos_configuration_for_test_host(): + """Retrieve the keytab and krb5.conf to setup the ubuntu test host.""" + kerberos_server = zaza.model.get_units('kerberos-server')[0] + ubuntu_test_host = zaza.model.get_units('ubuntu-test-host')[0] + + dump_file = "krb5.keytab" + remote_file = "/etc/krb5.keytab" + host_keytab_path = '/home/ubuntu/krb5.keytab' + with tempfile.TemporaryDirectory() as tmpdirname: + tmp_file = "{}/{}".format(tmpdirname, dump_file) + logging.info("Retrieving {} from {}.".format(remote_file, + kerberos_server.name)) + zaza.model.scp_from_unit( + kerberos_server.name, + remote_file, + tmp_file) + + logging.info("SCP {} to {} on {}.".format(tmp_file, + host_keytab_path, + ubuntu_test_host.name)) + zaza.model.scp_to_unit( + ubuntu_test_host.name, + tmp_file, + host_keytab_path) + + dump_file = "krb5.conf" + remote_file = "/etc/krb5.conf" + temp_krb5_path = "/home/ubuntu/krb5.conf" + with tempfile.TemporaryDirectory() as tmpdirname: + tmp_file = "{}/{}".format(tmpdirname, dump_file) + logging.info("Retrieving {} from {}".format(remote_file, + kerberos_server.name)) + zaza.model.scp_from_unit( + kerberos_server.name, + remote_file, + tmp_file) + + logging.info("SCP {} to {} on {}.".format(tmp_file, + temp_krb5_path, + ubuntu_test_host)) + zaza.model.scp_to_unit( + ubuntu_test_host.name, + tmp_file, + temp_krb5_path) + logging.info('Moving {} to {} on {}.'.format(temp_krb5_path, + remote_file, ubuntu_test_host.name)) + zaza.model.run_on_unit(ubuntu_test_host.name, ('sudo mv {} {}'. + format(temp_krb5_path, remote_file))) + + +def install_apt_packages_on_ubuntu_test_host(): + """Install apt packages on a zaza unit.""" + ubuntu_test_host = zaza.model.get_units('ubuntu-test-host')[0] + packages = ['krb5-user', 'python3-openstackclient', + 'python3-requests-kerberos'] + for package in packages: + logging.info('Installing {}'.format(package)) + result = zaza.model.run_on_unit(ubuntu_test_host.name, + "apt install {} -y".format(package)) + assert result['Code'] == '0', result['Stderr'] + + +def run_all_configuration_steps(): + """Execute all the necessary functions for the tests setup.""" + add_empty_resource_file_to_keystone_kerberos() + add_dns_entry() + configure_keystone_service_in_kerberos() + retrieve_and_attach_keytab() + openstack_setup_kerberos() + setup_kerberos_configuration_for_test_host() + install_apt_packages_on_ubuntu_test_host() diff --git a/zaza/openstack/charm_tests/kerberos/tests.py b/zaza/openstack/charm_tests/kerberos/tests.py new file mode 100644 index 0000000..0f627b7 --- /dev/null +++ b/zaza/openstack/charm_tests/kerberos/tests.py @@ -0,0 +1,74 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Keystone Kerberos Tests.""" + +import logging + +import zaza.model +from zaza.openstack.charm_tests.kerberos.setup import get_unit_full_hostname +from zaza.openstack.charm_tests.keystone import BaseKeystoneTest +from zaza.openstack.utilities import openstack as openstack_utils + + +class CharmKeystoneKerberosTest(BaseKeystoneTest): + """Charm Keystone Kerberos Test.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running Keystone Kerberos charm tests.""" + super(CharmKeystoneKerberosTest, cls).setUpClass() + + def test_keystone_kerberos_authentication(self): + """Validate auth to OpenStack through the kerberos method.""" + logging.info('Retrieving a kerberos token with kinit for admin user') + + ubuntu_test_host = zaza.model.get_units('ubuntu-test-host')[0] + result = zaza.model.run_on_unit(ubuntu_test_host.name, + "echo password123 | kinit admin") + assert result['Code'] == '0', result['Stderr'] + + logging.info('Changing token mod for user access') + result = zaza.model.run_on_unit( + ubuntu_test_host.name, + "sudo install -m 777 /tmp/krb5cc_0 /tmp/krb5cc_1000" + ) + assert result['Code'] == '0', result['Stderr'] + + logging.info('Fetching user/project info in OpenStack') + domain_name = 'k8s' + project_name = 'k8s' + keystone_session = openstack_utils.get_overcloud_keystone_session() + keystone_client = openstack_utils.get_keystone_session_client( + keystone_session) + domain_id = keystone_client.domains.find(name=domain_name).id + project_id = keystone_client.projects.find(name=project_name).id + keystone_hostname = get_unit_full_hostname('keystone') + + logging.info('Retrieving an OpenStack token to validate auth') + cmd = 'openstack token issue -f value -c id ' \ + '--os-auth-url http://{}:5000/krb/v3 ' \ + '--os-project-id {} ' \ + '--os-project-name {} ' \ + '--os-project-domain-id {} ' \ + '--os-region-name RegionOne ' \ + '--os-interface public ' \ + '--os-identity-api-version 3 ' \ + '--os-auth-type v3kerberos'.format(keystone_hostname, + project_id, + project_name, + domain_id) + + result = zaza.model.run_on_unit(ubuntu_test_host.name, cmd) + assert result['Code'] == '0', result['Stderr'] diff --git a/zaza/openstack/charm_tests/keystone/__init__.py b/zaza/openstack/charm_tests/keystone/__init__.py index 2f6dd1f..4d2e829 100644 --- a/zaza/openstack/charm_tests/keystone/__init__.py +++ b/zaza/openstack/charm_tests/keystone/__init__.py @@ -13,6 +13,7 @@ # limitations under the License. """Collection of code for setting up and testing keystone.""" +import contextlib import zaza import zaza.openstack.charm_tests.test_utils as test_utils import zaza.openstack.utilities.openstack as openstack_utils @@ -25,14 +26,19 @@ DEMO_ADMIN_USER_PASSWORD = 'password' DEMO_USER = 'demo' DEMO_PASSWORD = 'password' +TEMPEST_ROLES = ['member', 'ResellerAdmin'] + class BaseKeystoneTest(test_utils.OpenStackBaseTest): """Base for Keystone charm tests.""" @classmethod - def setUpClass(cls): + def setUpClass(cls, application_name=None): """Run class setup for running Keystone charm operation tests.""" super(BaseKeystoneTest, cls).setUpClass(application_name='keystone') + # Standardize v2 and v3 as ints + cls.api_v2 = 2 + cls.api_v3 = 3 # Check if we are related to Vault TLS certificates cls.tls_rid = zaza.model.get_relation_id( 'keystone', 'vault', remote_interface_name='certificates') @@ -50,12 +56,21 @@ class BaseKeystoneTest(test_utils.OpenStackBaseTest): cls.keystone_ips.append(cls.vip) if (openstack_utils.get_os_release() < openstack_utils.get_os_release('xenial_queens')): - cls.default_api_version = '2' + cls.default_api_version = cls.api_v2 else: - cls.default_api_version = '3' + cls.default_api_version = cls.api_v3 cls.admin_keystone_session = ( openstack_utils.get_overcloud_keystone_session()) cls.admin_keystone_client = ( openstack_utils.get_keystone_session_client( cls.admin_keystone_session, client_api_version=cls.default_api_version)) + + @contextlib.contextmanager + def v3_keystone_preferred(self): + """Set the preferred keystone api to v3 within called context.""" + with self.config_change( + {'preferred-api-version': self.default_api_version}, + {'preferred-api-version': self.api_v3}, + application_name="keystone"): + yield diff --git a/zaza/openstack/charm_tests/keystone/setup.py b/zaza/openstack/charm_tests/keystone/setup.py index c2cc4ac..73264cd 100644 --- a/zaza/openstack/charm_tests/keystone/setup.py +++ b/zaza/openstack/charm_tests/keystone/setup.py @@ -14,6 +14,12 @@ """Code for setting up keystone.""" +import logging + +import keystoneauth1 + +import zaza.charm_lifecycle.utils as lifecycle_utils +import zaza.model import zaza.openstack.utilities.openstack as openstack_utils from zaza.openstack.charm_tests.keystone import ( BaseKeystoneTest, @@ -24,9 +30,28 @@ from zaza.openstack.charm_tests.keystone import ( DEMO_ADMIN_USER_PASSWORD, DEMO_USER, DEMO_PASSWORD, + TEMPEST_ROLES, ) +def wait_for_cacert(model_name=None): + """Wait for keystone to install a cacert. + + :param model_name: Name of model to query. + :type model_name: str + """ + logging.info("Waiting for cacert") + zaza.openstack.utilities.openstack.block_until_ca_exists( + 'keystone', + 'CERTIFICATE', + model_name=model_name) + zaza.model.block_until_all_units_idle(model_name=model_name) + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get('target_deploy_status', {}), + model_name=model_name) + + def add_demo_user(): """Add a demo user to the current deployment.""" def _v2(): @@ -110,8 +135,35 @@ def add_demo_user(): # under test other than keystone. with _singleton.config_change( {'preferred-api-version': _singleton.default_api_version}, - {'preferred-api-version': '3'}, application_name="keystone"): + {'preferred-api-version': 3}, application_name="keystone"): _v3() else: # create only V3 user _v3() + + +def _add_additional_roles(roles): + """Add additional roles to this deployment. + + :param ctxt: roles + :type ctxt: list + :returns: None + :rtype: None + """ + keystone_session = openstack_utils.get_overcloud_keystone_session() + keystone_client = openstack_utils.get_keystone_session_client( + keystone_session) + for role_name in roles: + try: + keystone_client.roles.create(role_name) + except keystoneauth1.exceptions.http.Conflict: + pass + + +def add_tempest_roles(): + """Add tempest roles to this deployment. + + :returns: None + :rtype: None + """ + _add_additional_roles(TEMPEST_ROLES) diff --git a/zaza/openstack/charm_tests/keystone/tests.py b/zaza/openstack/charm_tests/keystone/tests.py index 66727cc..477152d 100644 --- a/zaza/openstack/charm_tests/keystone/tests.py +++ b/zaza/openstack/charm_tests/keystone/tests.py @@ -17,14 +17,13 @@ import collections import json import logging import pprint - import keystoneauth1 import zaza.model import zaza.openstack.utilities.exceptions as zaza_exceptions -import zaza.openstack.utilities.juju as juju_utils +import zaza.utilities.juju as juju_utils import zaza.openstack.utilities.openstack as openstack_utils - +import zaza.charm_lifecycle.utils as lifecycle_utils import zaza.openstack.charm_tests.test_utils as test_utils from zaza.openstack.charm_tests.keystone import ( BaseKeystoneTest, @@ -189,10 +188,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest): openstack_utils.get_os_release('trusty_mitaka')): logging.info('skipping test < trusty_mitaka') return - with self.config_change( - {'preferred-api-version': self.default_api_version}, - {'preferred-api-version': '3'}, - application_name="keystone"): + with self.v3_keystone_preferred(): for ip in self.keystone_ips: try: logging.info('keystone IP {}'.format(ip)) @@ -212,7 +208,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest): def test_end_user_domain_admin_access(self): """Verify that end-user domain admin does not have elevated privileges. - In additon to validating that the `policy.json` is written and the + In addition to validating that the `policy.json` is written and the service is restarted on config-changed, the test validates that our `policy.json` is correct. @@ -222,10 +218,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest): openstack_utils.get_os_release('xenial_ocata')): logging.info('skipping test < xenial_ocata') return - with self.config_change( - {'preferred-api-version': self.default_api_version}, - {'preferred-api-version': '3'}, - application_name="keystone"): + with self.v3_keystone_preferred(): for ip in self.keystone_ips: openrc = { 'API_VERSION': 3, @@ -236,7 +229,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest): 'OS_DOMAIN_NAME': DEMO_DOMAIN, } if self.tls_rid: - openrc['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT + openrc['OS_CACERT'] = openstack_utils.get_cacert() openrc['OS_AUTH_URL'] = ( openrc['OS_AUTH_URL'].replace('http', 'https')) logging.info('keystone IP {}'.format(ip)) @@ -257,7 +250,7 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest): 'allowed when it should not be.') logging.info('OK') - def test_end_user_acccess_and_token(self): + def test_end_user_access_and_token(self): """Verify regular end-user access resources and validate token data. In effect this also validates user creation, presence of standard @@ -266,9 +259,10 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest): """ def _validate_token_data(openrc): if self.tls_rid: - openrc['OS_CACERT'] = openstack_utils.KEYSTONE_LOCAL_CACERT + openrc['OS_CACERT'] = openstack_utils.get_cacert() openrc['OS_AUTH_URL'] = ( openrc['OS_AUTH_URL'].replace('http', 'https')) + logging.info('keystone IP {}'.format(ip)) keystone_session = openstack_utils.get_keystone_session( openrc) keystone_client = openstack_utils.get_keystone_session_client( @@ -326,15 +320,27 @@ class AuthenticationAuthorizationTest(BaseKeystoneTest): 'OS_PROJECT_DOMAIN_NAME': DEMO_DOMAIN, 'OS_PROJECT_NAME': DEMO_PROJECT, } - with self.config_change( - {'preferred-api-version': self.default_api_version}, - {'preferred-api-version': '3'}, - application_name="keystone"): + with self.v3_keystone_preferred(): for ip in self.keystone_ips: openrc.update( {'OS_AUTH_URL': 'http://{}:5000/v3'.format(ip)}) _validate_token_data(openrc) + def test_backward_compatible_uuid_for_default_domain(self): + """Check domain named ``default`` literally has ``default`` as ID. + + Some third party software chooses to hard code this value for some + inexplicable reason. + """ + with self.v3_keystone_preferred(): + ks_session = openstack_utils.get_keystone_session( + openstack_utils.get_overcloud_auth()) + ks_client = openstack_utils.get_keystone_session_client( + ks_session) + domain = ks_client.domains.get('default') + logging.info(pprint.pformat(domain)) + assert domain.id == 'default' + class SecurityTests(BaseKeystoneTest): """Keystone security tests tests.""" @@ -350,13 +356,13 @@ class SecurityTests(BaseKeystoneTest): # this initial work to get validation in. There will be bugs targeted # to each one and resolved independently where possible. expected_failures = [ - 'disable-admin-token', ] expected_passes = [ 'check-max-request-body-size', - 'uses-sha256-for-hashing-tokens', - 'uses-fernet-token-after-default', + 'disable-admin-token', 'insecure-debug-is-false', + 'uses-fernet-token-after-default', + 'uses-sha256-for-hashing-tokens', 'validate-file-ownership', 'validate-file-permissions', ] @@ -370,4 +376,307 @@ class SecurityTests(BaseKeystoneTest): action_params={}), expected_passes, expected_failures, - expected_to_pass=False) + expected_to_pass=True) + + +class LdapTests(BaseKeystoneTest): + """Keystone ldap tests.""" + + non_string_type_keys = ('ldap-user-enabled-mask', + 'ldap-user-enabled-invert', + 'ldap-group-members-are-ids', + 'ldap-use-pool') + + @classmethod + def setUpClass(cls): + """Run class setup for running Keystone ldap-tests.""" + super(LdapTests, cls).setUpClass() + + def _get_ldap_config(self): + """Generate ldap config for current model. + + :return: tuple of whether ldap-server is running and if so, config + for the keystone-ldap application. + :rtype: Tuple[bool, Dict[str,str]] + """ + ldap_ips = zaza.model.get_app_ips("ldap-server") + self.assertTrue(ldap_ips, "Should be at least one ldap server") + return { + 'ldap-server': "ldap://{}".format(ldap_ips[0]), + 'ldap-user': 'cn=admin,dc=test,dc=com', + 'ldap-password': 'crapper', + 'ldap-suffix': 'dc=test,dc=com', + 'domain-name': 'userdomain', + 'ldap-config-flags': + { + 'group_tree_dn': 'ou=groups,dc=test,dc=com', + 'group_objectclass': 'posixGroup', + 'group_name_attribute': 'cn', + 'group_member_attribute': 'memberUid', + 'group_members_are_ids': 'true', + } + } + + def _find_keystone_v3_user(self, username, domain, group=None): + """Find a user within a specified keystone v3 domain. + + :param str username: Username to search for in keystone + :param str domain: username selected from which domain + :param str group: group to search for in keystone for group membership + :return: return username if found + :rtype: Optional[str] + """ + for ip in self.keystone_ips: + logging.info('Keystone IP {}'.format(ip)) + session = openstack_utils.get_keystone_session( + openstack_utils.get_overcloud_auth(address=ip)) + client = openstack_utils.get_keystone_session_client(session) + + if group is None: + domain_users = client.users.list( + domain=client.domains.find(name=domain).id, + ) + else: + domain_users = client.users.list( + domain=client.domains.find(name=domain).id, + group=self._find_keystone_v3_group(group, domain).id, + ) + + usernames = [u.name.lower() for u in domain_users] + if username.lower() in usernames: + return username + + logging.debug( + "User {} was not found. Returning None.".format(username) + ) + return None + + def _find_keystone_v3_group(self, group, domain): + """Find a group within a specified keystone v3 domain. + + :param str group: Group to search for in keystone + :param str domain: group selected from which domain + :return: return group if found + :rtype: Optional[str] + """ + for ip in self.keystone_ips: + logging.info('Keystone IP {}'.format(ip)) + session = openstack_utils.get_keystone_session( + openstack_utils.get_overcloud_auth(address=ip)) + client = openstack_utils.get_keystone_session_client(session) + + domain_groups = client.groups.list( + domain=client.domains.find(name=domain).id + ) + + for searched_group in domain_groups: + if searched_group.name.lower() == group.lower(): + return searched_group + + logging.debug( + "Group {} was not found. Returning None.".format(group) + ) + return None + + def test_100_keystone_ldap_users(self): + """Validate basic functionality of keystone API with ldap.""" + application_name = 'keystone-ldap' + intended_cfg = self._get_ldap_config() + current_cfg, non_string_cfg = ( + self.config_current_separate_non_string_type_keys( + self.non_string_type_keys, intended_cfg, application_name) + ) + + with self.config_change( + {}, + non_string_cfg, + application_name=application_name, + reset_to_charm_default=True): + with self.config_change( + current_cfg, + intended_cfg, + application_name=application_name): + logging.info( + 'Waiting for users to become available in keystone...' + ) + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get("target_deploy_status", {}) + ) + + with self.v3_keystone_preferred(): + # NOTE(jamespage): Test fixture should have + # johndoe and janedoe accounts + johndoe = self._find_keystone_v3_user( + 'john doe', 'userdomain') + self.assertIsNotNone( + johndoe, "user 'john doe' was unknown") + janedoe = self._find_keystone_v3_user( + 'jane doe', 'userdomain') + self.assertIsNotNone( + janedoe, "user 'jane doe' was unknown") + + def test_101_keystone_ldap_groups(self): + """Validate basic functionality of keystone API with ldap.""" + application_name = 'keystone-ldap' + intended_cfg = self._get_ldap_config() + current_cfg, non_string_cfg = ( + self.config_current_separate_non_string_type_keys( + self.non_string_type_keys, intended_cfg, application_name) + ) + + with self.config_change( + {}, + non_string_cfg, + application_name=application_name, + reset_to_charm_default=True): + with self.config_change( + current_cfg, + intended_cfg, + application_name=application_name): + logging.info( + 'Waiting for groups to become available in keystone...' + ) + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get("target_deploy_status", {}) + ) + + with self.v3_keystone_preferred(): + # NOTE(arif-ali): Test fixture should have openstack and + # admin groups + openstack_group = self._find_keystone_v3_group( + 'openstack', 'userdomain') + self.assertIsNotNone( + openstack_group.name, "group 'openstack' was unknown") + admin_group = self._find_keystone_v3_group( + 'admin', 'userdomain') + self.assertIsNotNone( + admin_group.name, "group 'admin' was unknown") + + def test_102_keystone_ldap_group_membership(self): + """Validate basic functionality of keystone API with ldap.""" + application_name = 'keystone-ldap' + intended_cfg = self._get_ldap_config() + current_cfg, non_string_cfg = ( + self.config_current_separate_non_string_type_keys( + self.non_string_type_keys, intended_cfg, application_name) + ) + + with self.config_change( + {}, + non_string_cfg, + application_name=application_name, + reset_to_charm_default=True): + with self.config_change( + current_cfg, + intended_cfg, + application_name=application_name): + logging.info( + 'Waiting for groups to become available in keystone...' + ) + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get("target_deploy_status", {}) + ) + + with self.v3_keystone_preferred(): + # NOTE(arif-ali): Test fixture should have openstack and + # admin groups + openstack_group = self._find_keystone_v3_user( + 'john doe', 'userdomain', group='openstack') + self.assertIsNotNone( + openstack_group, + "john doe was not in group 'openstack'") + admin_group = self._find_keystone_v3_user( + 'john doe', 'userdomain', group='admin') + self.assertIsNotNone( + admin_group, "'john doe' was not in group 'admin'") + + +class LdapExplicitCharmConfigTests(LdapTests): + """Keystone ldap tests.""" + + def _get_ldap_config(self): + """Generate ldap config for current model. + + :return: tuple of whether ldap-server is running and if so, config + for the keystone-ldap application. + :rtype: Tuple[bool, Dict[str,str]] + """ + ldap_ips = zaza.model.get_app_ips("ldap-server") + self.assertTrue(ldap_ips, "Should be at least one ldap server") + return { + 'ldap-server': "ldap://{}".format(ldap_ips[0]), + 'ldap-user': 'cn=admin,dc=test,dc=com', + 'ldap-password': 'crapper', + 'ldap-suffix': 'dc=test,dc=com', + 'domain-name': 'userdomain', + 'ldap-query-scope': 'one', + 'ldap-user-objectclass': 'inetOrgPerson', + 'ldap-user-id-attribute': 'cn', + 'ldap-user-name-attribute': 'sn', + 'ldap-user-enabled-attribute': 'enabled', + 'ldap-user-enabled-invert': False, + 'ldap-user-enabled-mask': 0, + 'ldap-user-enabled-default': 'True', + 'ldap-group-tree-dn': 'ou=groups,dc=test,dc=com', + 'ldap-group-objectclass': '', + 'ldap-group-id-attribute': 'cn', + 'ldap-group-name-attribute': 'cn', + 'ldap-group-member-attribute': 'memberUid', + 'ldap-group-members-are-ids': True, + 'ldap-config-flags': '{group_objectclass: "posixGroup",' + ' use_pool: True,' + ' group_tree_dn: "group_tree_dn_foobar"}', + } + + def test_200_config_flags_precedence(self): + """Validates precedence when the same config options are used.""" + application_name = 'keystone-ldap' + intended_cfg = self._get_ldap_config() + current_cfg, non_string_cfg = ( + self.config_current_separate_non_string_type_keys( + self.non_string_type_keys, intended_cfg, application_name) + ) + + with self.config_change( + {}, + non_string_cfg, + application_name=application_name, + reset_to_charm_default=True): + with self.config_change( + current_cfg, + intended_cfg, + application_name=application_name): + logging.info( + 'Performing LDAP settings validation in keystone.conf...' + ) + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get("target_deploy_status", {}) + ) + units = zaza.model.get_units("keystone-ldap", + model_name=self.model_name) + result = zaza.model.run_on_unit( + units[0].name, + "cat /etc/keystone/domains/keystone.userdomain.conf") + # not present in charm config, but present in config flags + self.assertIn("use_pool = True", result['stdout'], + "use_pool value is expected to be present and " + "set to True in the config file") + # ldap-config-flags overriding empty charm config value + self.assertIn("group_objectclass = posixGroup", + result['stdout'], + "group_objectclass is expected to be present and" + " set to posixGroup in the config file") + # overridden by charm config, not written to file + self.assertNotIn( + "group_tree_dn_foobar", + result['stdout'], + "user_tree_dn ldap-config-flags value needs to be " + "overridden by ldap-user-tree-dn in config file") + # complementing the above, value used is from charm setting + self.assertIn("group_tree_dn = ou=groups", result['stdout'], + "user_tree_dn value is expected to be present " + "and set to dc=test,dc=com in the config file") diff --git a/zaza/openstack/charm_tests/magpie/__init__.py b/zaza/openstack/charm_tests/magpie/__init__.py new file mode 100644 index 0000000..7fd0805 --- /dev/null +++ b/zaza/openstack/charm_tests/magpie/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing Magpie.""" diff --git a/zaza/openstack/charm_tests/magpie/tests.py b/zaza/openstack/charm_tests/magpie/tests.py new file mode 100644 index 0000000..0ac25a6 --- /dev/null +++ b/zaza/openstack/charm_tests/magpie/tests.py @@ -0,0 +1,82 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate Magpie testing.""" + +import logging + +import zaza + +import zaza.model +import zaza.openstack.charm_tests.test_utils as test_utils + + +class MagpieTest(test_utils.BaseCharmTest): + """Base Magpie tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for Magpie charm operation tests.""" + super(MagpieTest, cls).setUpClass() + unit_names = sorted( + [i.entity_id + for i in zaza.model.get_units('magpie')]) + cls.test_unit_0 = unit_names[0] + cls.test_unit_1 = unit_names[1] + + def test_break_dns_single(self): + """Check DNS failure is reflected in workload status.""" + zaza.model.run_on_unit( + self.test_unit_0, + 'mv /etc/resolv.conf /etc/resolv.conf.bak') + zaza.model.run_on_unit( + self.test_unit_0, + './hooks/update-status') + zaza.model.block_until_unit_wl_message_match( + self.test_unit_0, + '.*rev dns failed.*') + logging.info('Restoring /etc/resolv.conf') + zaza.model.run_on_unit( + self.test_unit_0, + 'mv /etc/resolv.conf.bak /etc/resolv.conf') + logging.info('Updating status') + zaza.model.run_on_unit( + self.test_unit_0, + './hooks/update-status') + + def test_break_ping_single(self): + """Check ping failure is reflected in workload status.""" + icmp = "iptables {} INPUT -p icmp --icmp-type echo-request -j REJECT" + logging.info('Blocking ping on {}'.format(self.test_unit_1)) + zaza.model.run_on_unit( + self.test_unit_1, + icmp.format('--append')) + zaza.model.run_on_unit( + self.test_unit_0, + './hooks/update-status') + logging.info('Checking status on {}'.format(self.test_unit_0)) + zaza.model.block_until_unit_wl_message_match( + self.test_unit_0, + '.*icmp failed.*') + logging.info('Allowing ping on {}'.format(self.test_unit_1)) + zaza.model.run_on_unit( + self.test_unit_1, + icmp.format('--delete')) + zaza.model.run_on_unit( + self.test_unit_0, + './hooks/update-status') + logging.info('Checking status on {}'.format(self.test_unit_0)) + zaza.model.block_until_unit_wl_message_match( + self.test_unit_0, + '.*icmp ok.*') diff --git a/zaza/openstack/charm_tests/manila/__init__.py b/zaza/openstack/charm_tests/manila/__init__.py new file mode 100644 index 0000000..064c54c --- /dev/null +++ b/zaza/openstack/charm_tests/manila/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Encapsulate Manila setup and testing.""" diff --git a/zaza/openstack/charm_tests/manila/tests.py b/zaza/openstack/charm_tests/manila/tests.py new file mode 100644 index 0000000..8381f2f --- /dev/null +++ b/zaza/openstack/charm_tests/manila/tests.py @@ -0,0 +1,376 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate Manila testing.""" + +import logging +import tenacity + +from manilaclient import client as manilaclient + +import zaza.model +import zaza.openstack.configure.guest as guest +import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.charm_tests.nova.utils as nova_utils +import zaza.openstack.charm_tests.neutron.tests as neutron_tests + + +def verify_status(stdin, stdout, stderr): + """Callable to verify the command output. + + It checks if the command successfully executed. + + This is meant to be given as parameter 'verify' to the helper function + 'openstack_utils.ssh_command'. + """ + status = stdout.channel.recv_exit_status() + if status: + logging.info("{}".format(stderr.readlines()[0].strip())) + assert status == 0 + + +def verify_manila_testing_file(stdin, stdout, stderr): + """Callable to verify the command output. + + It checks if the command successfully executed, and it validates the + testing file written on the Manila share. + + This is meant to be given as parameter 'verify' to the helper function + 'openstack_utils.ssh_command'. + """ + verify_status(stdin, stdout, stderr) + out = "" + for line in iter(stdout.readline, ""): + out += line + assert out == "test\n" + + +class ManilaTests(test_utils.OpenStackBaseTest): + """Encapsulate Manila tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(ManilaTests, cls).setUpClass() + cls.manila_client = manilaclient.Client( + session=cls.keystone_session, client_version='2') + + def test_manila_api(self): + """Test that the Manila API is working.""" + # The manila charm contains a 'band-aid' for Bug #1706699 which relies + # on update-status to bring up services if needed. When the tests run + # an update-status hook might not have run so services may still be + # stopped so force a hook execution. + for unit in zaza.model.get_units('manila'): + zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") + self.assertEqual([], self._list_shares()) + + @tenacity.retry( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) + def _list_shares(self): + return self.manila_client.shares.list() + + +class ManilaBaseTest(test_utils.OpenStackBaseTest): + """Encapsulate a Manila basic functionality test.""" + + RESOURCE_PREFIX = 'zaza-manilatests' + INSTANCE_KEY = 'bionic' + INSTANCE_USERDATA = """#cloud-config +packages: +- nfs-common +""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(ManilaBaseTest, cls).setUpClass() + cls.nova_client = openstack_utils.get_nova_session_client( + session=cls.keystone_session) + cls.manila_client = manilaclient.Client( + session=cls.keystone_session, client_version='2') + cls.share_name = 'test-manila-share' + cls.share_type_name = 'default_share_type' + cls.share_protocol = 'nfs' + cls.mount_dir = '/mnt/manila_share' + cls.share_network = None + + @classmethod + def tearDownClass(cls): + """Run class teardown after tests finished.""" + # Cleanup Nova servers + logging.info('Cleaning up test Nova servers') + fips_reservations = [] + for vm in cls.nova_client.servers.list(): + fips_reservations += neutron_tests.floating_ips_from_instance(vm) + vm.delete() + openstack_utils.resource_removed( + cls.nova_client.servers, + vm.id, + msg="Waiting for the Nova VM {} to be deleted".format(vm.name)) + + # Delete FiPs reservations + logging.info('Cleaning up test FiPs reservations') + neutron = openstack_utils.get_neutron_session_client( + session=cls.keystone_session) + for fip in neutron.list_floatingips()['floatingips']: + if fip['floating_ip_address'] in fips_reservations: + neutron.delete_floatingip(fip['id']) + + # Cleanup Manila shares + logging.info('Cleaning up test shares') + for share in cls.manila_client.shares.list(): + share.delete() + openstack_utils.resource_removed( + cls.manila_client.shares, + share.id, + msg="Waiting for the Manila share {} to be deleted".format( + share.name)) + + # Cleanup test Manila share servers (spawned by the driver when DHSS + # is enabled). + logging.info('Cleaning up test shares servers (if found)') + for server in cls.manila_client.share_servers.list(): + server.delete() + openstack_utils.resource_removed( + cls.manila_client.share_servers, + server.id, + msg="Waiting for the share server {} to be deleted".format( + server.id)) + + def _get_mount_options(self): + """Get the appropriate mount options used to mount the Manila share. + + :returns: The proper mount options flags for the share protocol. + :rtype: string + """ + if self.share_protocol == 'nfs': + return 'nfsvers=4.1,proto=tcp' + else: + raise NotImplementedError( + 'Share protocol not supported yet: {}'.format( + self.share_protocol)) + + def _mount_share_on_instance(self, instance_ip, ssh_user_name, + ssh_private_key, share_path): + """Mount a share into a Nova instance. + + The mount command is executed via SSH. + + :param instance_ip: IP of the Nova instance. + :type instance_ip: string + :param ssh_user_name: SSH user name. + :type ssh_user_name: string + :param ssh_private_key: SSH private key. + :type ssh_private_key: string + :param share_path: Share network path. + :type share_path: string + """ + ssh_cmd = ( + 'sudo mkdir -p {0} && ' + 'sudo mount -t {1} -o {2} {3} {0}'.format( + self.mount_dir, + self.share_protocol, + self._get_mount_options(), + share_path)) + + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)): + with attempt: + openstack_utils.ssh_command( + vm_name="instance-{}".format(instance_ip), + ip=instance_ip, + username=ssh_user_name, + privkey=ssh_private_key, + command=ssh_cmd, + verify=verify_status) + + @tenacity.retry( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) + def _write_testing_file_on_instance(self, instance_ip, ssh_user_name, + ssh_private_key): + """Write a file on a Manila share mounted into a Nova instance. + + Write a testing file into the already mounted Manila share from the + given Nova instance (which is meant to be validated from another + instance). These commands are executed via SSH. + + :param instance_ip: IP of the Nova instance. + :type instance_ip: string + :param ssh_user_name: SSH user name. + :type ssh_user_name: string + :param ssh_private_key: SSH private key. + :type ssh_private_key: string + """ + openstack_utils.ssh_command( + vm_name="instance-{}".format(instance_ip), + ip=instance_ip, + username=ssh_user_name, + privkey=ssh_private_key, + command='echo "test" | sudo tee {}/test'.format( + self.mount_dir), + verify=verify_status) + + @tenacity.retry( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) + def _clear_testing_file_on_instance(self, instance_ip, ssh_user_name, + ssh_private_key): + """Clear a file on a Manila share mounted into a Nova instance. + + Remove a testing file into the already mounted Manila share from the + given Nova instance (which is meant to be validated from another + instance). These commands are executed via SSH. + + :param instance_ip: IP of the Nova instance. + :type instance_ip: string + :param ssh_user_name: SSH user name. + :type ssh_user_name: string + :param ssh_private_key: SSH private key. + :type ssh_private_key: string + """ + openstack_utils.ssh_command( + vm_name="instance-{}".format(instance_ip), + ip=instance_ip, + username=ssh_user_name, + privkey=ssh_private_key, + command='sudo rm {}/test'.format( + self.mount_dir), + verify=verify_status) + + @tenacity.retry( + stop=tenacity.stop_after_attempt(5), + wait=tenacity.wait_exponential(multiplier=3, min=2, max=10)) + def _validate_testing_file_from_instance(self, instance_ip, ssh_user_name, + ssh_private_key): + """Validate a file from the Manila share mounted into a Nova instance. + + This is meant to run after the testing file was already written into + another Nova instance. It validates the written file. The commands are + executed via SSH. + + :param instance_ip: IP of the Nova instance. + :type instance_ip: string + :param ssh_user_name: SSH user name. + :type ssh_user_name: string + :param ssh_private_key: SSH private key. + :type ssh_private_key: string + """ + openstack_utils.ssh_command( + vm_name="instance-{}".format(instance_ip), + ip=instance_ip, + username=ssh_user_name, + privkey=ssh_private_key, + command='sudo cat {}/test'.format(self.mount_dir), + verify=verify_manila_testing_file) + + def _restart_share_instance(self): + """Restart the share service's provider. + + restart_share_instance is intended to be overridden with driver + specific implementations that allow verrification that the share is + still accessible after the service is restarted. + + :returns bool: If the test should re-validate + :rtype: bool + """ + return False + + def test_manila_share(self): + """Test that a Manila share can be accessed on two instances. + + 1. Spawn two servers + 2. Create a share + 3. Mount it on both + 4. Write a file on one + 5. Read it on the other + 6. Profit + """ + # Spawn Servers + instance_1 = self.launch_guest( + guest_name='ins-1', + userdata=self.INSTANCE_USERDATA, + instance_key=self.INSTANCE_KEY) + instance_2 = self.launch_guest( + guest_name='ins-2', + userdata=self.INSTANCE_USERDATA, + instance_key=self.INSTANCE_KEY) + + fip_1 = neutron_tests.floating_ips_from_instance(instance_1)[0] + fip_2 = neutron_tests.floating_ips_from_instance(instance_2)[0] + + # Create a share + share = self.manila_client.shares.create( + share_type=self.share_type_name, + name=self.share_name, + share_proto=self.share_protocol, + share_network=self.share_network, + size=1) + + # Wait for the created share to become available before it gets used. + openstack_utils.resource_reaches_status( + self.manila_client.shares, + share.id, + wait_iteration_max_time=120, + stop_after_attempt=2, + expected_status="available", + msg="Waiting for a share to become available") + + # Grant access to the Manila share for both Nova instances. + share.allow(access_type='ip', access=fip_1, access_level='rw') + share.allow(access_type='ip', access=fip_2, access_level='rw') + + ssh_user_name = guest.boot_tests[self.INSTANCE_KEY]['username'] + privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME) + share_path = share.export_locations[0] + + # Write a testing file on instance #1 + self._mount_share_on_instance( + fip_1, ssh_user_name, privkey, share_path) + self._write_testing_file_on_instance( + fip_1, ssh_user_name, privkey) + + # Validate the testing file from instance #2 + self._mount_share_on_instance( + fip_2, ssh_user_name, privkey, share_path) + self._validate_testing_file_from_instance( + fip_2, ssh_user_name, privkey) + + # Restart the share provider + if self._restart_share_instance(): + logging.info("Verifying manila after restarting share instance") + # Read the previous testing file from instance #1 + self._mount_share_on_instance( + fip_1, ssh_user_name, privkey, share_path) + self._validate_testing_file_from_instance( + fip_1, ssh_user_name, privkey) + # Read the previous testing file from instance #1 + self._mount_share_on_instance( + fip_2, ssh_user_name, privkey, share_path) + # Reset the test! + self._clear_testing_file_on_instance( + fip_1, ssh_user_name, privkey + ) + # Write a testing file on instance #1 + self._write_testing_file_on_instance( + fip_1, ssh_user_name, privkey) + # Validate the testing file from instance #2 + self._validate_testing_file_from_instance( + fip_2, ssh_user_name, privkey) diff --git a/zaza/openstack/charm_tests/manila_ganesha/__init__.py b/zaza/openstack/charm_tests/manila_ganesha/__init__.py new file mode 100644 index 0000000..d9238d6 --- /dev/null +++ b/zaza/openstack/charm_tests/manila_ganesha/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Encapsulate Manila Ganesha setup and testing.""" diff --git a/zaza/openstack/charm_tests/manila_ganesha/setup.py b/zaza/openstack/charm_tests/manila_ganesha/setup.py new file mode 100644 index 0000000..a804958 --- /dev/null +++ b/zaza/openstack/charm_tests/manila_ganesha/setup.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Encapsulate Manila Ganesha setup.""" + + +import zaza.openstack.utilities.openstack as openstack_utils + +from manilaclient import client as manilaclient + + +MANILA_GANESHA_TYPE_NAME = "cephfsnfstype" + + +def setup_ganesha_share_type(manila_client=None): + """Create a share type for manila with Ganesha. + + :param manila_client: Authenticated manilaclient + :type manila_client: manilaclient.Client + """ + if manila_client is None: + keystone_session = openstack_utils.get_overcloud_keystone_session() + manila_client = manilaclient.Client( + session=keystone_session, client_version='2') + + manila_client.share_types.create( + name=MANILA_GANESHA_TYPE_NAME, spec_driver_handles_share_servers=False, + extra_specs={ + 'vendor_name': 'Ceph', + 'storage_protocol': 'NFS', + 'snapshot_support': False, + }) diff --git a/zaza/openstack/charm_tests/manila_ganesha/tests.py b/zaza/openstack/charm_tests/manila_ganesha/tests.py new file mode 100644 index 0000000..2e9a186 --- /dev/null +++ b/zaza/openstack/charm_tests/manila_ganesha/tests.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate Manila Ganesha testing.""" + +import logging + +from zaza.openstack.charm_tests.manila_ganesha.setup import ( + MANILA_GANESHA_TYPE_NAME, +) + +import zaza.openstack.charm_tests.manila.tests as manila_tests +import zaza.model + + +class ManilaGaneshaTests(manila_tests.ManilaBaseTest): + """Encapsulate Manila Ganesha tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(ManilaGaneshaTests, cls).setUpClass() + cls.share_name = 'cephnfsshare1' + cls.share_type_name = MANILA_GANESHA_TYPE_NAME + cls.share_protocol = 'nfs' + + def _restart_share_instance(self): + logging.info('Restarting manila-share and nfs-ganesha') + # It would be better for thie to derive the application name, + # manila-ganesha-az1, from deployed instances fo the manila-ganesha + # charm; however, that functionality isn't present yet in zaza, so + # this is hard coded to the application name used in that charm's + # test bundles. + for unit in zaza.model.get_units('manila-ganesha-az1'): + # While we really only need to run this on the machine hosting + # nfs-ganesha and manila-share, running it everywhere isn't + # harmful. Pacemaker handles restarting the services + zaza.model.run_on_unit( + unit.entity_id, + "systemctl stop manila-share nfs-ganesha") + return True diff --git a/zaza/openstack/charm_tests/manila_netapp/__init__.py b/zaza/openstack/charm_tests/manila_netapp/__init__.py new file mode 100644 index 0000000..f6e2865 --- /dev/null +++ b/zaza/openstack/charm_tests/manila_netapp/__init__.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 + +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate Manila NetApp setup and testing.""" diff --git a/zaza/openstack/charm_tests/manila_netapp/setup.py b/zaza/openstack/charm_tests/manila_netapp/setup.py new file mode 100644 index 0000000..fa1a671 --- /dev/null +++ b/zaza/openstack/charm_tests/manila_netapp/setup.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 + +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate Manila NetApp setup.""" + +import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.charm_tests.neutron.setup as neutron_setup + + +MANILA_NETAPP_TYPE_NAME = "netapp-ontap" +MANILA_NETAPP_BACKEND_NAME = "netapp-ontap" + +MANILA_NETAPP_DHSS_TYPE_NAME = "netapp-ontap-dhss" +MANILA_NETAPP_DHSS_BACKEND_NAME = "netapp-ontap-dhss" + +MANILA_NETAPP_SHARE_NET_NAME = "netapp-ontap-share-network" + + +def create_netapp_share_type(manila_client=None): + """Create a share type for Manila with NetApp Data ONTAP driver. + + :param manila_client: Authenticated manilaclient + :type manila_client: manilaclient.Client + """ + if manila_client is None: + manila_client = openstack_utils.get_manila_session_client( + openstack_utils.get_overcloud_keystone_session()) + + manila_client.share_types.create( + name=MANILA_NETAPP_TYPE_NAME, + spec_driver_handles_share_servers=False, + extra_specs={ + 'vendor_name': 'NetApp', + 'share_backend_name': MANILA_NETAPP_BACKEND_NAME, + 'storage_protocol': 'NFS_CIFS', + }) + + +def create_netapp_dhss_share_type(manila_client=None): + """Create a DHSS share type for Manila with NetApp Data ONTAP driver. + + :param manila_client: Authenticated manilaclient + :type manila_client: manilaclient.Client + """ + if manila_client is None: + manila_client = openstack_utils.get_manila_session_client( + openstack_utils.get_overcloud_keystone_session()) + + manila_client.share_types.create( + name=MANILA_NETAPP_DHSS_TYPE_NAME, + spec_driver_handles_share_servers=True, + extra_specs={ + 'vendor_name': 'NetApp', + 'share_backend_name': MANILA_NETAPP_DHSS_BACKEND_NAME, + 'storage_protocol': 'NFS_CIFS', + }) + + +def create_netapp_share_network(manila_client=None): + """Create a Manila share network from the existing provider network. + + This setup function assumes that 'neutron.setup.basic_overcloud_network' + is called to have the proper tenant networks setup. + + The share network will be bound to the provider network configured by + 'neutron.setup.basic_overcloud_network'. + """ + session = openstack_utils.get_overcloud_keystone_session() + if manila_client is None: + manila_client = openstack_utils.get_manila_session_client(session) + + neutron = openstack_utils.get_neutron_session_client(session) + external_net = neutron.find_resource( + 'network', + neutron_setup.OVERCLOUD_NETWORK_CONFIG['external_net_name']) + external_subnet = neutron.find_resource( + 'subnet', + neutron_setup.OVERCLOUD_NETWORK_CONFIG['external_subnet_name']) + + manila_client.share_networks.create( + name=MANILA_NETAPP_SHARE_NET_NAME, + neutron_net_id=external_net['id'], + neutron_subnet_id=external_subnet['id']) diff --git a/zaza/openstack/charm_tests/manila_netapp/tests.py b/zaza/openstack/charm_tests/manila_netapp/tests.py new file mode 100644 index 0000000..f9178ba --- /dev/null +++ b/zaza/openstack/charm_tests/manila_netapp/tests.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 + +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate Manila NetApp testing.""" + +from zaza.openstack.charm_tests.manila_netapp.setup import ( + MANILA_NETAPP_TYPE_NAME, + MANILA_NETAPP_DHSS_TYPE_NAME, + MANILA_NETAPP_SHARE_NET_NAME, +) + +import zaza.openstack.charm_tests.manila.tests as manila_tests + + +class ManilaNetAppNFSTest(manila_tests.ManilaBaseTest): + """Encapsulate Manila NetApp NFS test.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(ManilaNetAppNFSTest, cls).setUpClass() + cls.share_name = 'netapp-ontap-share' + cls.share_type_name = MANILA_NETAPP_TYPE_NAME + cls.share_protocol = 'nfs' + + +class ManilaNetAppDHSSNFSTest(manila_tests.ManilaBaseTest): + """Encapsulate Manila NetApp NFS test.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(ManilaNetAppDHSSNFSTest, cls).setUpClass() + cls.share_name = 'netapp-ontap-dhss-share' + cls.share_type_name = MANILA_NETAPP_DHSS_TYPE_NAME + cls.share_protocol = 'nfs' + cls.share_network = cls.manila_client.share_networks.find( + name=MANILA_NETAPP_SHARE_NET_NAME) diff --git a/zaza/openstack/charm_tests/masakari/tests.py b/zaza/openstack/charm_tests/masakari/tests.py index 274119a..6f27d17 100644 --- a/zaza/openstack/charm_tests/masakari/tests.py +++ b/zaza/openstack/charm_tests/masakari/tests.py @@ -18,6 +18,7 @@ from datetime import datetime import logging +import unittest import tenacity import novaclient @@ -37,7 +38,8 @@ class MasakariTest(test_utils.OpenStackBaseTest): @classmethod def setUpClass(cls): """Run class setup for running tests.""" - super(MasakariTest, cls).setUpClass() + super(MasakariTest, cls).setUpClass(application_name="masakari") + cls.current_release = openstack_utils.get_os_release() cls.keystone_session = openstack_utils.get_overcloud_keystone_session() cls.model_name = zaza.model.get_juju_model() cls.nova_client = openstack_utils.get_nova_session_client( @@ -132,8 +134,32 @@ class MasakariTest(test_utils.OpenStackBaseTest): vm_uuid, model_name=self.model_name) + @tenacity.retry(wait=tenacity.wait_exponential(multiplier=2, max=60), + reraise=True, stop=tenacity.stop_after_attempt(5), + retry=tenacity.retry_if_exception_type(AssertionError)) + def wait_for_guest_ready(self, vm_name): + """Wait for the guest to be ready. + + :param vm_name: Name of guest to check. + :type vm_name: str + """ + guest_ready_attr_checks = [ + ('OS-EXT-STS:task_state', None), + ('status', 'ACTIVE'), + ('OS-EXT-STS:power_state', 1), + ('OS-EXT-STS:vm_state', 'active')] + guest = self.nova_client.servers.find(name=vm_name) + logging.info('Checking guest {} attributes'.format(vm_name)) + for (attr, required_state) in guest_ready_attr_checks: + logging.info('Checking {} is {}'.format(attr, required_state)) + assert getattr(guest, attr) == required_state + def test_instance_failover(self): """Test masakari managed guest migration.""" + # Workaround for Bug #1874719 + zaza.openstack.configure.hacluster.remove_node( + 'masakari', + 'node1') # Launch guest self.assertTrue( zaza.openstack.configure.hacluster.check_all_nodes_online( @@ -162,11 +188,18 @@ class MasakariTest(test_utils.OpenStackBaseTest): model_name=self.model_name) openstack_utils.enable_all_nova_services(self.nova_client) zaza.openstack.configure.masakari.enable_hosts() + self.wait_for_guest_ready(vm_name) def test_instance_restart_on_fail(self): - """Test singlee guest crash and recovery.""" + """Test single guest crash and recovery.""" + if self.current_release < openstack_utils.get_os_release( + 'bionic_ussuri'): + raise unittest.SkipTest( + "Not supported on {}. Bug #1866638".format( + self.current_release)) vm_name = 'zaza-test-instance-failover' vm = self.ensure_guest(vm_name) + self.wait_for_guest_ready(vm_name) _, unit_name = self.get_guests_compute_info(vm_name) logging.info('{} is running on {}'.format(vm_name, unit_name)) guest_pid = self.get_guest_qemu_pid( @@ -192,6 +225,6 @@ class MasakariTest(test_utils.OpenStackBaseTest): unit_name, vm.id, model_name=self.model_name) - logging.info('{} pid is now {}'.format(vm_name, guest_pid)) + logging.info('{} pid is now {}'.format(vm_name, new_guest_pid)) assert new_guest_pid and new_guest_pid != guest_pid, ( "Restart failed or never happened") diff --git a/zaza/openstack/charm_tests/mysql/tests.py b/zaza/openstack/charm_tests/mysql/tests.py index 32305fe..5fbfb7e 100644 --- a/zaza/openstack/charm_tests/mysql/tests.py +++ b/zaza/openstack/charm_tests/mysql/tests.py @@ -14,10 +14,11 @@ """MySQL/Percona Cluster Testing.""" +import json import logging import os import re - +import tempfile import tenacity import zaza.charm_lifecycle.utils as lifecycle_utils @@ -28,31 +29,20 @@ import zaza.openstack.utilities.openstack as openstack_utils import zaza.openstack.utilities.generic as generic_utils -class MySQLTest(test_utils.OpenStackBaseTest): +PXC_SEEDED_FILE = "/var/lib/percona-xtradb-cluster/seeded" + + +class MySQLBaseTest(test_utils.OpenStackBaseTest): """Base for mysql charm tests.""" @classmethod - def setUpClass(cls): + def setUpClass(cls, application_name=None): """Run class setup for running mysql tests.""" - super(MySQLTest, cls).setUpClass() + super().setUpClass(application_name=application_name) cls.application = "mysql" cls.services = ["mysqld"] - - -class PerconaClusterTest(test_utils.OpenStackBaseTest): - """Base for percona-cluster charm tests.""" - - @classmethod - def setUpClass(cls): - """Run class setup for running percona-cluster tests.""" - super(PerconaClusterTest, cls).setUpClass() - cls.application = "percona-cluster" - # This is the service pidof will attempt to find - # rather than what systemctl uses - cls.services = ["mysqld"] - cls.vip = os.environ.get("OS_VIP00") - cls.leader = None - cls.non_leaders = [] + # Config file affected by juju set config change + cls.conf_file = "/etc/mysql/mysql.conf.d/mysqld.cnf" def get_root_password(self): """Get the MySQL root password. @@ -64,6 +54,198 @@ class PerconaClusterTest(test_utils.OpenStackBaseTest): self.application, "leader-get root-password")["Stdout"].strip() + def get_leaders_and_non_leaders(self): + """Get leader node and non-leader nodes of percona. + + Update and set on the object the leader node and list of non-leader + nodes. + + :returns: None + :rtype: None + """ + status = zaza.model.get_status().applications[self.application] + # Reset + self.leader = None + self.non_leaders = [] + for unit in status["units"]: + if status["units"][unit].get("leader"): + self.leader = unit + else: + self.non_leaders.append(unit) + return self.leader, self.non_leaders + + def get_cluster_status(self): + """Get cluster status. + + Return cluster status dict from the cluster-status action or raise + assertion error. + + :returns: Dictionary of cluster status + :rtype: dict + """ + logging.info("Running cluster-status action") + action = zaza.model.run_action_on_leader( + self.application, + "cluster-status", + action_params={}) + assert action.data.get("results") is not None, ( + "Cluster status action failed: No results: {}" + .format(action.data)) + assert action.data["results"].get("cluster-status") is not None, ( + "Cluster status action failed: No cluster-status: {}" + .format(action.data)) + return json.loads(action.data["results"]["cluster-status"]) + + def get_rw_primary_node(self): + """Get RW primary node. + + Return RW primary node unit. + + :returns: Unit object of primary node + :rtype: Union[Unit, None] + """ + _status = self.get_cluster_status() + _primary_ip = _status['groupInformationSourceMember'] + if ":" in _primary_ip: + _primary_ip = _primary_ip.split(':')[0] + units = zaza.model.get_units(self.application_name) + for unit in units: + if _primary_ip in unit.public_address: + return unit + + def get_blocked_mysql_routers(self): + """Get blocked mysql routers. + + :returns: List of blocked mysql-router unit names + :rtype: List[str] + """ + # Make sure mysql-router units are up to date + # We cannot assume they are as there is up to a five minute delay + mysql_router_units = [] + for application in self.get_applications_with_substring_in_name( + "mysql-router"): + for unit in zaza.model.get_units(application): + mysql_router_units.append(unit.entity_id) + self.run_update_status_hooks(mysql_router_units) + + # Get up to date status + status = zaza.model.get_status().applications + blocked_mysql_routers = [] + # Check if the units are blocked + for application in self.get_applications_with_substring_in_name( + "mysql-router"): + # Subordinate dance with primary + # There is no satus[applicatoin]["units"] for subordinates + _subordinate_to = status[application].subordinate_to[0] + for appunit in status[_subordinate_to].units: + for subunit in ( + status[_subordinate_to]. + units[appunit].subordinates.keys()): + if "blocked" in ( + status[_subordinate_to].units[appunit]. + subordinates[subunit].workload_status.status): + blocked_mysql_routers.append(subunit) + return blocked_mysql_routers + + def restart_blocked_mysql_routers(self): + """Restart blocked mysql routers. + + :returns: None + :rtype: None + """ + # Check for blocked mysql-router units + blocked_mysql_routers = self.get_blocked_mysql_routers() + for unit in blocked_mysql_routers: + logging.warning( + "Restarting blocked mysql-router unit {}" + .format(unit)) + zaza.model.run_on_unit( + unit, + "systemctl restart {}".format(unit.rpartition("/")[0])) + + +class MySQLCommonTests(MySQLBaseTest): + """Common mysql charm tests.""" + + def test_110_mysqldump(self): + """Backup mysql. + + Run the mysqldump action. + """ + _db = "keystone" + _file_key = "mysqldump-file" + logging.info("Execute mysqldump action") + # Need to change strict mode to be able to dump database + if self.application_name == "percona-cluster": + action = zaza.model.run_action_on_leader( + self.application_name, + "set-pxc-strict-mode", + action_params={"mode": "MASTER"}) + + action = zaza.model.run_action_on_leader( + self.application, + "mysqldump", + action_params={"databases": _db}) + _results = action.data["results"] + assert _db in _results[_file_key], ( + "Mysqldump action failed: {}".format(action.data)) + logging.info("Passed mysqldump action test.") + + def test_910_restart_on_config_change(self): + """Checking restart happens on config change. + + Change max connections and assert that change propagates to the correct + file and that services are restarted as a result + """ + # Expected default and alternate values + set_default = {"max-connections": "600"} + set_alternate = {"max-connections": "1000"} + + # Make config change, check for service restarts + logging.info("Setting max connections ...") + self.restart_on_changed( + self.conf_file, + set_default, + set_alternate, + {}, {}, + self.services) + logging.info("Passed restart on changed test.") + + def test_920_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + with self.pause_resume(self.services): + logging.info("Testing pause resume") + + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + + # If there are any blocekd mysql routers restart them. + self.restart_blocked_mysql_routers() + assert not self.get_blocked_mysql_routers(), ( + "Should no longer be blocked mysql-router units") + + logging.info("Passed pause and resume test.") + + +class PerconaClusterBaseTest(MySQLBaseTest): + """Base for percona-cluster charm tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running percona-cluster tests.""" + super().setUpClass() + cls.application = "percona-cluster" + # This is the service pidof will attempt to find + # rather than what systemctl uses + cls.services = ["mysqld"] + cls.vip = os.environ.get("TEST_VIP00") + # Config file affected by juju set config change + cls.conf_file = "/etc/mysql/percona-xtradb-cluster.conf.d/mysqld.cnf" + def get_wsrep_value(self, attr): """Get wsrrep value from the DB. @@ -78,7 +260,7 @@ class PerconaClusterTest(test_utils.OpenStackBaseTest): output = zaza.model.run_on_leader( self.application, cmd)["Stdout"].strip() value = re.search(r"^.+?\s+(.+)", output).group(1) - logging.debug("%s = %s" % (attr, value)) + logging.info("%s = %s" % (attr, value)) return value def is_pxc_bootstrapped(self): @@ -116,7 +298,7 @@ class PerconaClusterTest(test_utils.OpenStackBaseTest): cmd = "ip -br addr" result = zaza.model.run_on_unit(unit.entity_id, cmd) output = result.get("Stdout").strip() - logging.debug(output) + logging.info(output) if self.vip in output: logging.info("vip ({}) running in {}".format( self.vip, @@ -124,39 +306,13 @@ class PerconaClusterTest(test_utils.OpenStackBaseTest): ) return unit.entity_id - def update_leaders_and_non_leaders(self): - """Get leader node and non-leader nodes of percona. - Update and set on the object the leader node and list of non-leader - nodes. - - :returns: None - :rtype: None - """ - status = zaza.model.get_status().applications[self.application] - # Reset - self.leader = None - self.non_leaders = [] - for unit in status["units"]: - if status["units"][unit].get("leader"): - self.leader = unit - else: - self.non_leaders.append(unit) - - -class PerconaClusterCharmTests(PerconaClusterTest): - """Base for percona-cluster charm tests. +class PerconaClusterCharmTests(MySQLCommonTests, PerconaClusterBaseTest): + """Percona-cluster charm tests. .. note:: these have tests have been ported from amulet tests """ - @classmethod - def setUpClass(cls): - """Run class setup for running percona-cluster tests.""" - super(PerconaClusterTest, cls).setUpClass() - cls.application = "percona-cluster" - cls.services = ["mysqld"] - def test_100_bootstrapped_and_clustered(self): """Ensure PXC is bootstrapped and that peer units are clustered.""" self.units = zaza.model.get_application_config( @@ -171,37 +327,9 @@ class PerconaClusterCharmTests(PerconaClusterTest): " (wanted=%s, cluster_size=%s)" % (self.units, cluster_size)) assert cluster_size >= self.units, msg - def test_110_restart_on_config_change(self): - """Checking restart happens on config change. - - Change disk format and assert then change propagates to the correct - file and that services are restarted as a result - """ - # Expected default and alternate values - set_default = {"peer-timeout": "PT3S"} - set_alternate = {"peer-timeout": "PT15S"} - - # Config file affected by juju set config change - conf_file = "/etc/mysql/percona-xtradb-cluster.conf.d/mysqld.cnf" - - # Make config change, check for service restarts - logging.debug("Setting peer timeout ...") - self.restart_on_changed( - conf_file, - set_default, - set_alternate, - {}, {}, - self.services) - logging.info("Passed restart on changed") - - def test_120_pause_resume(self): - """Run pause and resume tests. - - Pause service and check services are stopped then resume and check - they are started - """ - with self.pause_resume(self.services): - logging.info("Testing pause resume") + logging.info("Ensuring PXC seeded file is present") + zaza.model.block_until_file_has_contents(self.application, + PXC_SEEDED_FILE, "done") def test_130_change_root_password(self): """Change root password. @@ -233,7 +361,7 @@ class PerconaClusterCharmTests(PerconaClusterTest): assert code == "0", output -class PerconaClusterColdStartTest(PerconaClusterTest): +class PerconaClusterColdStartTest(PerconaClusterBaseTest): """Percona Cluster cold start tests.""" @classmethod @@ -244,8 +372,6 @@ class PerconaClusterColdStartTest(PerconaClusterTest): openstack_utils.get_undercloud_keystone_session()) cls.nova_client = openstack_utils.get_nova_session_client( cls.overcloud_keystone_session) - cls.machines = ( - juju_utils.get_machine_uuids_for_application(cls.application)) def resolve_update_status_errors(self): """Resolve update-status hooks error. @@ -269,25 +395,26 @@ class PerconaClusterColdStartTest(PerconaClusterTest): After bootstrapping a non-leader node, notify bootstrapped on the leader node. """ + _machines = sorted( + juju_utils.get_machine_uuids_for_application(self.application)) # Stop Nodes - self.machines.sort() # Avoid hitting an update-status hook - logging.debug("Wait till model is idle ...") + logging.info("Wait till model is idle ...") zaza.model.block_until_all_units_idle() - logging.info("Stopping instances: {}".format(self.machines)) - for uuid in self.machines: + logging.info("Stopping instances: {}".format(_machines)) + for uuid in _machines: self.nova_client.servers.stop(uuid) - logging.debug("Wait till all machines are shutoff ...") - for uuid in self.machines: + logging.info("Wait till all machines are shutoff ...") + for uuid in _machines: openstack_utils.resource_reaches_status(self.nova_client.servers, uuid, expected_status='SHUTOFF', stop_after_attempt=16) # Start nodes - self.machines.sort(reverse=True) - logging.info("Starting instances: {}".format(self.machines)) - for uuid in self.machines: + _machines.sort(reverse=True) + logging.info("Starting instances: {}".format(_machines)) + for uuid in _machines: self.nova_client.servers.start(uuid) for unit in zaza.model.get_units(self.application): @@ -296,7 +423,7 @@ class PerconaClusterColdStartTest(PerconaClusterTest): 'unknown', negate_match=True) - logging.debug("Wait till model is idle ...") + logging.info("Wait till model is idle ...") # XXX If a hook was executing on a unit when it was powered off # it comes back in an error state. try: @@ -305,7 +432,7 @@ class PerconaClusterColdStartTest(PerconaClusterTest): self.resolve_update_status_errors() zaza.model.block_until_all_units_idle() - logging.debug("Wait for application states ...") + logging.info("Wait for application states ...") for unit in zaza.model.get_units(self.application): try: zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") @@ -318,17 +445,17 @@ class PerconaClusterColdStartTest(PerconaClusterTest): zaza.model.wait_for_application_states(states=states) # Update which node is the leader and which are not - self.update_leaders_and_non_leaders() + _leader, _non_leaders = self.get_leaders_and_non_leaders() # We want to test the worst possible scenario which is the # non-leader with the highest sequence number. We will use the leader # for the notify-bootstrapped after. They just need to be different # units. logging.info("Execute bootstrap-pxc action after cold boot ...") zaza.model.run_action( - self.non_leaders[0], + _non_leaders[0], "bootstrap-pxc", action_params={}) - logging.debug("Wait for application states ...") + logging.info("Wait for application states ...") for unit in zaza.model.get_units(self.application): zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") states = {"percona-cluster": { @@ -342,10 +469,10 @@ class PerconaClusterColdStartTest(PerconaClusterTest): self.application, "notify-bootstrapped", action_params={}) - logging.debug("Wait for application states ...") + logging.info("Wait for application states ...") for unit in zaza.model.get_units(self.application): zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") - test_config = lifecycle_utils.get_charm_config() + test_config = lifecycle_utils.get_charm_config(fatal=False) zaza.model.wait_for_application_states( states=test_config.get("target_deploy_status", {})) @@ -367,17 +494,9 @@ def retry_is_new_crm_master(test, old_crm_master): return False -class PerconaClusterScaleTests(PerconaClusterTest): +class PerconaClusterScaleTests(PerconaClusterBaseTest): """Percona Cluster scale tests.""" - @classmethod - def setUpClass(cls): - """Run class setup for running percona scale tests. - - .. note:: these have tests have been ported from amulet tests - """ - super(PerconaClusterScaleTests, cls).setUpClass() - def test_100_kill_crm_master(self): """Ensure VIP failover. @@ -403,3 +522,511 @@ class PerconaClusterScaleTests(PerconaClusterTest): # always true. assert generic_utils.is_port_open("3306", self.vip), \ "Cannot connect to vip" + + +class MySQLInnoDBClusterTests(MySQLCommonTests): + """Mysql-innodb-cluster charm tests. + + Note: The restart on changed and pause/resume tests also validate the + changing of the R/W primary. On each mysqld shutodown a new R/W primary is + elected automatically by MySQL. + """ + + @classmethod + def setUpClass(cls): + """Run class setup for running mysql-innodb-cluster tests.""" + super().setUpClass() + cls.application = "mysql-innodb-cluster" + + def test_100_cluster_status(self): + """Checking cluster status. + + Run the cluster-status action. + """ + logging.info("Execute cluster-status action") + cluster_status = self.get_cluster_status() + assert "OK" in cluster_status["defaultReplicaSet"]["status"], ( + "Cluster status is not OK: {}" + .format(cluster_status)) + logging.info("Passed cluster-status action test.") + + def test_120_set_cluster_option(self): + """Set cluster option. + + Run the set-cluster-option action. + """ + _key = "autoRejoinTries" + _value = "500" + logging.info("Set cluster option {}={}".format(_key, _value)) + action = zaza.model.run_action_on_leader( + self.application, + "set-cluster-option", + action_params={"key": _key, "value": _value}) + assert "Success" in action.data["results"]["outcome"], ( + "Set cluster option {}={} action failed: {}" + .format(_key, _value, action.data)) + logging.info("Passed set cluster option action test.") + + +class MySQLInnoDBClusterColdStartTest(MySQLBaseTest): + """Percona Cluster cold start tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running percona-cluster cold start tests.""" + super().setUpClass() + cls.application = "mysql-innodb-cluster" + cls.overcloud_keystone_session = ( + openstack_utils.get_undercloud_keystone_session()) + cls.nova_client = openstack_utils.get_nova_session_client( + cls.overcloud_keystone_session) + + def resolve_update_status_errors(self): + """Resolve update-status hooks error. + + This should *only* be used after an instance hard reboot to handle the + situation where a update-status hook was running when the unit was + rebooted. + """ + zaza.model.resolve_units( + application_name=self.application, + erred_hook='update-status', + wait=True, timeout=180) + + def test_100_reboot_cluster_from_complete_outage(self): + """Reboot cluster from complete outage. + + After a cold start, reboot cluster from complete outage. + """ + _machines = sorted( + juju_utils.get_machine_uuids_for_application(self.application)) + # Stop Nodes + # Avoid hitting an update-status hook + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + logging.info("Stopping instances: {}".format(_machines)) + for uuid in _machines: + self.nova_client.servers.stop(uuid) + logging.info("Wait till all machines are shutoff ...") + for uuid in _machines: + openstack_utils.resource_reaches_status(self.nova_client.servers, + uuid, + expected_status='SHUTOFF', + stop_after_attempt=16) + + # Start nodes + _machines.sort(reverse=True) + logging.info("Starting instances: {}".format(_machines)) + for uuid in _machines: + self.nova_client.servers.start(uuid) + + logging.info( + "Wait till all {} units are in state 'unkown' ..." + .format(self.application)) + for unit in zaza.model.get_units(self.application): + zaza.model.block_until_unit_wl_status( + unit.entity_id, + 'unknown', + negate_match=True) + + logging.info("Wait till model is idle ...") + try: + zaza.model.block_until_all_units_idle() + except zaza.model.UnitError: + self.resolve_update_status_errors() + zaza.model.block_until_all_units_idle() + + logging.info("Clear error hooks after reboot ...") + for unit in zaza.model.get_units(self.application): + try: + zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") + except zaza.model.UnitError: + self.resolve_update_status_errors() + zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") + + logging.info( + "Wait till all {} units are in state 'blocked' ..." + .format(self.application)) + for unit in zaza.model.get_units(self.application): + zaza.model.block_until_unit_wl_status( + unit.entity_id, + 'blocked') + + # Wait until update-status hooks have completed + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + + logging.info("Execute reboot-cluster-from-complete-outage " + "action after cold boot ...") + # We do not know which unit has the most up to date data + # run reboot-cluster-from-complete-outage until we get a success. + for unit in zaza.model.get_units(self.application): + action = zaza.model.run_action( + unit.entity_id, + "reboot-cluster-from-complete-outage", + action_params={}) + if "Success" in action.data.get("results", {}).get("outcome", ""): + break + else: + logging.info(action.data.get("results", {}).get("output", "")) + + assert "Success" in action.data["results"]["outcome"], ( + "Reboot cluster from complete outage action failed: {}" + .format(action.data)) + logging.info("Wait for application states ...") + for unit in zaza.model.get_units(self.application): + zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get("target_deploy_status", {})) + + +class MySQL8MigrationTests(MySQLBaseTest): + """Percona Cluster to MySQL InnoDB Cluster Tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running migration tests.""" + # Having application_name set avoids breakage in the + # OpenStackBaseTest class when running bundle tests without + # charm_name specified + super().setUpClass(application_name="mysql-innodb-cluster") + + def test_999_migrate_percona_to_mysql(self): + """Migrate DBs from percona-cluster to mysql-innodb-cluster. + + Do not rely on self.application_name or other pre-set class values as + we will be pointing to both percona-cluster and mysql-innodb-cluster. + """ + # Map application name to db name + apps_to_dbs = { + "keystone": ["keystone"], + "glance": ["glance"], + "cinder": ["cinder"], + "nova-cloud-controller": ["nova", "nova_api", "nova_cell0"], + "neutron-api": ["neutron"], + "openstack-dashboard": ["horizon"], + "placement": ["placement"], + "vault": ["vault"]} + # TODO: This could do an automated check of what is actually deployed + dbs = [db for mapped_dbs in apps_to_dbs.values() for db in mapped_dbs] + percona_application = "percona-cluster" + mysql_application = "mysql-innodb-cluster" + percona_leader = zaza.model.get_unit_from_name( + zaza.model.get_lead_unit_name(percona_application)) + mysql_leader = zaza.model.get_unit_from_name( + zaza.model.get_lead_unit_name(mysql_application)) + logging.info("Remove percona-cluster:shared-db relations ...") + for app in apps_to_dbs.keys(): + # Remove relations + zaza.model.remove_relation( + percona_application, + "{}:shared-db".format(percona_application), + "{}:shared-db".format(app)) + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + # Set PXC Strict Mode to MASTER + logging.info("Set PXC Strict Mode MASTER ...") + action = zaza.model.run_action_on_leader( + percona_application, + "set-pxc-strict-mode", + action_params={"mode": "MASTER"}) + assert "failed" not in action.data["status"], ( + "Set PXC Strict Mode MASTER action failed: {}" + .format(action.data)) + # Dump the percona db + logging.info("mysqldump percona-cluster DBs ...") + action = zaza.model.run_action_on_leader( + percona_application, + "mysqldump", + action_params={ + "databases": ",".join(dbs)}) + assert "failed" not in action.data["status"], ( + "mysqldump action failed: {}" + .format(action.data)) + remote_file = action.data["results"]["mysqldump-file"] + remote_backup_dir = "/var/backups/mysql" + # Permissions for ubuntu user to read + logging.info("Set permissions to read percona-cluster:{} ..." + .format(remote_backup_dir)) + zaza.model.run_on_leader( + percona_application, + "chmod 755 {}".format(remote_backup_dir)) + + # SCP back and forth + dump_file = "dump.sql.gz" + logging.info("SCP percona-cluster:{} to mysql-innodb-cluster:{} ..." + .format(remote_file, dump_file)) + with tempfile.TemporaryDirectory() as tmpdirname: + tmp_file = "{}/{}".format(tmpdirname, dump_file) + zaza.model.scp_from_unit( + percona_leader.name, + remote_file, + tmp_file) + zaza.model.scp_to_unit( + mysql_leader.name, + tmp_file, + dump_file) + # Restore mysqldump to mysql-innodb-cluster + logging.info("restore-mysqldump DBs onto mysql-innodb-cluster ...") + action = zaza.model.run_action_on_leader( + mysql_application, + "restore-mysqldump", + action_params={ + "dump-file": "/home/ubuntu/{}".format(dump_file)}) + assert "failed" not in action.data["status"], ( + "restore-mysqldump action failed: {}" + .format(action.data)) + # Add db router relations + logging.info("Add mysql-router:shared-db relations ...") + for app in apps_to_dbs.keys(): + # add relations + zaza.model.add_relation( + mysql_application, + "{}:shared-db".format(app), + "{}-mysql-router:shared-db".format(app)) + # Set PXC Strict Mode back to ENFORCING + logging.info("Set PXC Strict Mode ENFORCING ...") + action = zaza.model.run_action_on_leader( + percona_application, + "set-pxc-strict-mode", + action_params={"mode": "ENFORCING"}) + assert "failed" not in action.data["status"], ( + "Set PXC Strict Mode ENFORCING action failed: {}" + .format(action.data)) + logging.info("Wait for application states ...") + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get("target_deploy_status", {})) + + +class MySQLInnoDBClusterScaleTest(MySQLBaseTest): + """Percona Cluster cold start tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running mysql-innodb-cluster scale tests.""" + super().setUpClass() + cls.application = "mysql-innodb-cluster" + cls.test_config = lifecycle_utils.get_charm_config(fatal=False) + cls.states = cls.test_config.get("target_deploy_status", {}) + + def test_800_remove_leader(self): + """Remove leader node. + + We start with a three node cluster, remove one, down to two. + The cluster will be in waiting state. + """ + logging.info("Scale in test: remove leader") + leader, nons = self.get_leaders_and_non_leaders() + leader_unit = zaza.model.get_unit_from_name(leader) + + # Wait until we are idle in the hopes clients are not running + # update-status hooks + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + zaza.model.destroy_unit(self.application_name, leader) + + logging.info("Wait until all only 2 units ...") + zaza.model.block_until_unit_count(self.application, 2) + + logging.info("Wait until all units are cluster incomplete ...") + zaza.model.block_until_wl_status_info_starts_with( + self.application, "'cluster' incomplete") + + # Show status + logging.info(self.get_cluster_status()) + + logging.info( + "Removing old unit from cluster: {} " + .format(leader_unit.public_address)) + action = zaza.model.run_action( + nons[0], + "remove-instance", + action_params={ + "address": leader_unit.public_address, + "force": True}) + assert action.data.get("results") is not None, ( + "Remove instance action failed: No results: {}" + .format(action.data)) + + def test_801_add_unit(self): + """Add mysql-innodb-cluster node. + + We start with two node cluster in waiting, add one, back to a full + cluster of three. + """ + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + + logging.info("Adding unit after removed unit ...") + zaza.model.add_unit(self.application_name) + + logging.info("Wait until 3 units ...") + zaza.model.block_until_unit_count(self.application, 3) + + logging.info("Wait for application states ...") + zaza.model.wait_for_application_states(states=self.states) + + def test_802_add_unit(self): + """Add another mysql-innodb-cluster node. + + We start with a three node full cluster, add another, up to a four node + cluster. + """ + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + + logging.info("Adding unit after full cluster ...") + zaza.model.add_unit(self.application_name) + + logging.info("Wait until 4 units ...") + zaza.model.block_until_unit_count(self.application, 4) + + logging.info("Wait for application states ...") + zaza.model.wait_for_application_states(states=self.states) + + def test_803_remove_fourth(self): + """Remove mysql-innodb-cluster node. + + We start with a four node full cluster, remove one, down to a three + node full cluster. + """ + leader, nons = self.get_leaders_and_non_leaders() + non_leader_unit = zaza.model.get_unit_from_name(nons[0]) + + # Wait until we are idle in the hopes clients are not running + # update-status hooks + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + + zaza.model.destroy_unit(self.application_name, nons[0]) + + logging.info("Scale in test: back down to three") + logging.info("Wait until 3 units ...") + zaza.model.block_until_unit_count(self.application, 3) + + logging.info("Wait for status ready ...") + zaza.model.wait_for_application_states(states=self.states) + + # Show status + logging.info(self.get_cluster_status()) + + logging.info( + "Removing old unit from cluster: {} " + .format(non_leader_unit.public_address)) + action = zaza.model.run_action( + leader, + "remove-instance", + action_params={ + "address": non_leader_unit.public_address, + "force": True}) + assert action.data.get("results") is not None, ( + "Remove instance action failed: No results: {}" + .format(action.data)) + + +class MySQLInnoDBClusterPartitionTest(MySQLBaseTest): + """MySQL partition handling.""" + + def test_850_force_quorum_using_partition_of(self): + """Force quorum using partition of instance with given address. + + After outage, cluster can end up without quorum. Force it. + """ + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + + # Block all traffic across mysql instances: 0<-1, 1<-2 and 2<-0 + mysql_units = [unit for unit in zaza.model.get_units(self.application)] + no_of_units = len(mysql_units) + for index, unit in enumerate(mysql_units): + next_unit = mysql_units[(index+1) % no_of_units] + ip_address = next_unit.public_address + cmd = "sudo iptables -A INPUT -s {} -j DROP".format(ip_address) + zaza.model.async_run_on_unit(unit, cmd) + + logging.info( + "Wait till all {} units are in state 'blocked' ..." + .format(self.application)) + for unit in zaza.model.get_units(self.application): + zaza.model.block_until_unit_wl_status( + unit.entity_id, + 'blocked', + negate_match=True) + + logging.info("Wait till model is idle ...") + zaza.model.block_until_all_units_idle() + + logging.info("Execute force-quorum-using-partition-of action ...") + + # Select "quorum leader" unit + leader_unit = mysql_units[0] + action = zaza.model.run_action( + leader_unit.entity_id, + "force-quorum-using-partition-of", + action_params={ + "address": leader_unit.public_address, + 'i-really-mean-it': True + }) + + assert action.data.get("results") is not None, ( + "Force quorum using partition of action failed: {}" + .format(action.data)) + logging.debug( + "Results from running 'force-quorum' command ...\n{}".format( + action.data)) + + logging.info("Wait till model is idle ...") + try: + zaza.model.block_until_all_units_idle() + except zaza.model.UnitError: + self.resolve_update_status_errors() + zaza.model.block_until_all_units_idle() + + # Unblock all traffic across mysql instances + for unit in zaza.model.get_units(self.application): + cmd = "sudo iptables -F" + zaza.model.async_run_on_unit(unit, cmd) + + logging.info("Wait for application states ...") + for unit in zaza.model.get_units(self.application): + zaza.model.run_on_unit(unit.entity_id, "hooks/update-status") + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get("target_deploy_status", {})) + + +class MySQLRouterTests(test_utils.OpenStackBaseTest): + """MySQL Router Tests.""" + + @classmethod + def setUpClass(cls, application_name="keystone-mysql-router"): + """Run class setup for running mysql-router tests.""" + super().setUpClass(application_name=application_name) + cls.application = application_name + cls.services = ["mysqlrouter"] + # Config file affected by juju set config change + cls.conf_file = ( + "/var/lib/mysql/{}-mysql-router/mysqlrouter.conf" + .format(application_name)) + + def test_910_restart_on_config_change(self): + """Checking restart happens on config change. + + Change max connections and assert that change propagates to the correct + file and that services are restarted as a result + """ + # Expected default and alternate values + set_default = {"ttl": ".5"} + set_alternate = {"ttl": "7"} + + # Make config change, check for service restarts + logging.info("Setting TTL ...") + self.restart_on_changed( + self.conf_file, + set_default, + set_alternate, + {}, {}, + self.services) + logging.info("Passed restart on changed test.") diff --git a/zaza/openstack/charm_tests/mysql/utils.py b/zaza/openstack/charm_tests/mysql/utils.py new file mode 100644 index 0000000..75da5d9 --- /dev/null +++ b/zaza/openstack/charm_tests/mysql/utils.py @@ -0,0 +1,34 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module of functions for interfacing with the percona-cluster charm.""" + +import zaza.model as model + + +async def complete_cluster_series_upgrade(): + """Run the complete-cluster-series-upgrade action on the lead unit.""" + # Note that some models use mysql as the application name, and other's use + # percona-cluster. Try mysql first, and if it doesn't exist, then try + # percona-cluster instead. + try: + await model.async_run_action_on_leader( + 'mysql', + 'complete-cluster-series-upgrade', + action_params={}) + except KeyError: + await model.async_run_action_on_leader( + 'percona-cluster', + 'complete-cluster-series-upgrade', + action_params={}) diff --git a/zaza/openstack/charm_tests/neutron/setup.py b/zaza/openstack/charm_tests/neutron/setup.py index edacd22..a1d1dd4 100644 --- a/zaza/openstack/charm_tests/neutron/setup.py +++ b/zaza/openstack/charm_tests/neutron/setup.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 - # Copyright 2018 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,6 +14,9 @@ """Setup for Neutron deployments.""" +import functools +import logging + from zaza.openstack.configure import ( network, ) @@ -25,7 +26,8 @@ from zaza.openstack.utilities import ( juju as juju_utils, openstack as openstack_utils, ) -import zaza.model as model + +import zaza.charm_lifecycle.utils as lifecycle_utils # The overcloud network configuration settings are declared. @@ -57,12 +59,14 @@ DEFAULT_UNDERCLOUD_NETWORK_CONFIG = { } -def basic_overcloud_network(): +def basic_overcloud_network(limit_gws=None): """Run setup for neutron networking. Configure the following: The overcloud network using subnet pools + :param limit_gws: Limit the number of gateways that get a port attached + :type limit_gws: int """ cli_utils.setup_logging() @@ -74,19 +78,49 @@ def basic_overcloud_network(): network_config.update(DEFAULT_UNDERCLOUD_NETWORK_CONFIG) # Environment specific settings network_config.update(generic_utils.get_undercloud_env_vars()) - # Deployed model settings - if (model.get_application_config('neutron-api') - .get('enable-dvr').get('value')): - network_config.update({"dvr_enabled": True}) # Get keystone session keystone_session = openstack_utils.get_overcloud_keystone_session() - # Handle network for Openstack-on-Openstack scenarios - if juju_utils.get_provider_type() == "openstack": + # Get optional use_juju_wait for netw ork option + options = (lifecycle_utils + .get_charm_config(fatal=False) + .get('configure_options', {})) + use_juju_wait = options.get( + 'configure_gateway_ext_port_use_juju_wait', True) + + # Handle network for OpenStack-on-OpenStack scenarios + provider_type = juju_utils.get_provider_type() + if provider_type == "openstack": undercloud_ks_sess = openstack_utils.get_undercloud_keystone_session() network.setup_gateway_ext_port(network_config, - keystone_session=undercloud_ks_sess) + keystone_session=undercloud_ks_sess, + limit_gws=limit_gws, + use_juju_wait=use_juju_wait) + elif provider_type == "maas": + # NOTE(fnordahl): After validation of the MAAS+Netplan Open vSwitch + # integration support, we would most likely want to add multiple modes + # of operation with MAAS. + # + # Perform charm based OVS configuration + openstack_utils.configure_charmed_openstack_on_maas( + network_config, limit_gws=limit_gws) + else: + logging.warning('Unknown Juju provider type, "{}", will not perform' + ' charm network configuration.' + .format(provider_type)) # Confugre the overcloud network network.setup_sdn(network_config, keystone_session=keystone_session) + + +# Configure function to get one gateway with external network +overcloud_network_one_gw = functools.partial( + basic_overcloud_network, + limit_gws=1) + + +# Configure function to get two gateways with external network +overcloud_network_two_gws = functools.partial( + basic_overcloud_network, + limit_gws=2) diff --git a/zaza/openstack/charm_tests/neutron/tests.py b/zaza/openstack/charm_tests/neutron/tests.py index 8657e4b..54420e8 100644 --- a/zaza/openstack/charm_tests/neutron/tests.py +++ b/zaza/openstack/charm_tests/neutron/tests.py @@ -14,27 +14,315 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Encapsulating `neutron-openvswitch` testing.""" +"""Encapsulating testing of some `neutron-*` charms. + +`neutron-api`, `neutron-gateway` and `neutron-openvswitch` +""" +import copy import logging -import unittest +import tenacity + +from neutronclient.common import exceptions as neutronexceptions import zaza -import zaza.openstack.charm_tests.glance.setup as glance_setup import zaza.openstack.charm_tests.nova.utils as nova_utils import zaza.openstack.charm_tests.test_utils as test_utils import zaza.openstack.configure.guest as guest import zaza.openstack.utilities.openstack as openstack_utils -class NeutronApiTest(test_utils.OpenStackBaseTest): +class NeutronPluginApiSharedTests(test_utils.OpenStackBaseTest): + """Shared tests for Neutron Plugin API Charms.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running Neutron Openvswitch tests.""" + super(NeutronPluginApiSharedTests, cls).setUpClass() + + cls.current_os_release = openstack_utils.get_os_release() + cls.bionic_stein = openstack_utils.get_os_release('bionic_stein') + cls.trusty_mitaka = openstack_utils.get_os_release('trusty_mitaka') + + if cls.current_os_release >= cls.bionic_stein: + cls.pgrep_full = True + else: + cls.pgrep_full = False + + def test_211_ovs_use_veth(self): + """Verify proper handling of ovs-use-veth setting.""" + current_release = openstack_utils.get_os_release() + xenial_mitaka = openstack_utils.get_os_release('xenial_mitaka') + if current_release < xenial_mitaka: + logging.info( + "Skipping OVS use veth test. ovs_use_veth is always True on " + "Trusty.") + return + conf_file = "/etc/neutron/dhcp_agent.ini" + expected = {"DEFAULT": {"ovs_use_veth": ["False"]}} + test_config = zaza.charm_lifecycle.utils.get_charm_config(fatal=False) + states = test_config.get("target_deploy_status", {}) + alt_states = copy.deepcopy(states) + alt_states[self.application_name] = { + "workload-status": "blocked", + "workload-status-message": + "Mismatched existing and configured ovs-use-veth. See log."} + + if "neutron-openvswitch" in self.application_name: + logging.info("Turning on DHCP and metadata") + zaza.model.set_application_config( + self.application_name, + {"enable-local-dhcp-and-metadata": "True"}) + zaza.model.wait_for_application_states(states=states) + + logging.info("Check for expected default ovs-use-veth setting of " + "False") + zaza.model.block_until_oslo_config_entries_match( + self.application_name, + conf_file, + expected, + ) + logging.info("Setting conflicting ovs-use-veth to True") + zaza.model.set_application_config( + self.application_name, + {"ovs-use-veth": "True"}) + logging.info("Wait to go into a blocked workload status") + zaza.model.wait_for_application_states(states=alt_states) + # Check the value stayed the same + logging.info("Check that the value of ovs-use-veth setting " + "remained False") + zaza.model.block_until_oslo_config_entries_match( + self.application_name, + conf_file, + expected, + ) + logging.info("Setting ovs-use-veth to match existing.") + zaza.model.set_application_config( + self.application_name, + {"ovs-use-veth": "False"}) + logging.info("Wait to go into unit ready workload status") + zaza.model.wait_for_application_states(states=states) + + +class NeutronGatewayTest(NeutronPluginApiSharedTests): + """Test basic Neutron Gateway Charm functionality.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running Neutron Gateway tests.""" + super(NeutronGatewayTest, cls).setUpClass() + cls.services = cls._get_services() + + # set up clients + cls.neutron_client = ( + openstack_utils.get_neutron_session_client(cls.keystone_session)) + + _APP_NAME = 'neutron-gateway' + + def test_401_enable_qos(self): + """Check qos settings set via neutron-api charm.""" + if (self.current_os_release >= + openstack_utils.get_os_release('trusty_mitaka')): + logging.info('running qos check') + + with self.config_change( + {'enable-qos': 'False'}, + {'enable-qos': 'True'}, + application_name="neutron-api"): + + self._validate_openvswitch_agent_qos() + + @tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60), + reraise=True, stop=tenacity.stop_after_attempt(8)) + def _validate_openvswitch_agent_qos(self): + """Validate that the qos extension is enabled in the ovs agent.""" + # obtain the dhcp agent to identify the neutron-gateway host + dhcp_agent = self.neutron_client.list_agents( + binary='neutron-dhcp-agent')['agents'][0] + neutron_gw_host = dhcp_agent['host'] + logging.debug('neutron gw host: {}'.format(neutron_gw_host)) + + # check extensions on the ovs agent to validate qos + ovs_agent = self.neutron_client.list_agents( + binary='neutron-openvswitch-agent', + host=neutron_gw_host)['agents'][0] + + self.assertIn('qos', ovs_agent['configurations']['extensions']) + + def test_900_restart_on_config_change(self): + """Checking restart happens on config change. + + Change debug mode and assert that change propagates to the correct + file and that services are restarted as a result + """ + # Expected default and alternate values + current_value = zaza.model.get_application_config( + self._APP_NAME)['debug']['value'] + new_value = str(not bool(current_value)).title() + current_value = str(current_value).title() + + set_default = {'debug': current_value} + set_alternate = {'debug': new_value} + default_entry = {'DEFAULT': {'debug': [current_value]}} + alternate_entry = {'DEFAULT': {'debug': [new_value]}} + + # Config file affected by juju set config change + conf_file = '/etc/neutron/neutron.conf' + + # Make config change, check for service restarts + logging.info( + 'Setting verbose on {} {}'.format(self._APP_NAME, set_alternate)) + self.restart_on_changed( + conf_file, + set_default, + set_alternate, + default_entry, + alternate_entry, + self.services, + pgrep_full=self.pgrep_full) + + def test_910_pause_and_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + with self.pause_resume( + self.services, + pgrep_full=self.pgrep_full): + logging.info("Testing pause resume") + + def test_920_change_aa_profile(self): + """Test changing the Apparmor profile mode.""" + services = ['neutron-openvswitch-agent', + 'neutron-dhcp-agent', + 'neutron-l3-agent', + 'neutron-metadata-agent', + 'neutron-metering-agent'] + + set_default = {'aa-profile-mode': 'disable'} + set_alternate = {'aa-profile-mode': 'complain'} + + mtime = zaza.model.get_unit_time( + self.lead_unit, + model_name=self.model_name) + logging.debug('Remote unit timestamp {}'.format(mtime)) + + with self.config_change(set_default, set_alternate): + for unit in zaza.model.get_units(self._APP_NAME, + model_name=self.model_name): + logging.info('Checking number of profiles in complain ' + 'mode in {}'.format(unit.entity_id)) + run = zaza.model.run_on_unit( + unit.entity_id, + 'aa-status --complaining', + model_name=self.model_name) + output = run['Stdout'] + self.assertTrue(int(output) >= len(services)) + + @classmethod + def _get_services(cls): + """ + Return the services expected in Neutron Gateway. + + :returns: A list of services + :rtype: list[str] + """ + services = ['neutron-dhcp-agent', + 'neutron-metadata-agent', + 'neutron-metering-agent', + 'neutron-openvswitch-agent'] + + trusty_icehouse = openstack_utils.get_os_release('trusty_icehouse') + xenial_newton = openstack_utils.get_os_release('xenial_newton') + bionic_train = openstack_utils.get_os_release('bionic_train') + + if cls.current_os_release <= trusty_icehouse: + services.append('neutron-vpn-agent') + if cls.current_os_release < xenial_newton: + services.append('neutron-lbaas-agent') + if xenial_newton <= cls.current_os_release < bionic_train: + services.append('neutron-lbaasv2-agent') + + return services + + +class NeutronCreateNetworkTest(test_utils.OpenStackBaseTest): + """Test creating a Neutron network through the API. + + This is broken out into a separate class as it can be useful as standalone + tests for Neutron plugin subordinate charms. + """ + + @classmethod + def setUpClass(cls): + """Run class setup for running Neutron Gateway tests.""" + super(NeutronCreateNetworkTest, cls).setUpClass() + cls.current_os_release = openstack_utils.get_os_release() + + # set up clients + cls.neutron_client = ( + openstack_utils.get_neutron_session_client(cls.keystone_session)) + cls.neutron_client.format = 'json' + + _TEST_NET_NAME = 'test_net' + + def test_400_create_network(self): + """Create a network, verify that it exists, and then delete it.""" + self._wait_for_neutron_ready() + self._assert_test_network_doesnt_exist() + self._create_test_network() + net_id = self._assert_test_network_exists_and_return_id() + self._delete_test_network(net_id) + self._assert_test_network_doesnt_exist() + + @classmethod + def _wait_for_neutron_ready(cls): + logging.info('Waiting for Neutron to become ready...') + zaza.model.wait_for_application_states() + for attempt in tenacity.Retrying( + wait=tenacity.wait_fixed(5), # seconds + stop=tenacity.stop_after_attempt(12), + reraise=True): + with attempt: + cls.neutron_client.list_networks() + + def _create_test_network(self): + logging.info('Creating neutron network...') + network = {'name': self._TEST_NET_NAME} + self.neutron_client.create_network({'network': network}) + + def _delete_test_network(self, net_id): + logging.info('Deleting neutron network...') + self.neutron_client.delete_network(net_id) + + def _assert_test_network_exists_and_return_id(self): + logging.debug('Confirming new neutron network...') + networks = self.neutron_client.list_networks(name=self._TEST_NET_NAME) + logging.debug('Networks: {}'.format(networks)) + net_len = len(networks['networks']) + assert net_len == 1, ( + "Expected 1 network, found {}".format(net_len)) + network = networks['networks'][0] + assert network['name'] == self._TEST_NET_NAME, \ + "network {} not found".format(self._TEST_NET_NAME) + return network['id'] + + def _assert_test_network_doesnt_exist(self): + networks = self.neutron_client.list_networks(name=self._TEST_NET_NAME) + net_count = len(networks['networks']) + assert net_count == 0, ( + "Expected zero networks, found {}".format(net_count)) + + +class NeutronApiTest(NeutronCreateNetworkTest): """Test basic Neutron API Charm functionality.""" def test_900_restart_on_config_change(self): """Checking restart happens on config change. - Change disk format and assert then change propagates to the correct + Change debug mode and assert that change propagates to the correct file and that services are restarted as a result """ # Expected default and alternate values @@ -86,25 +374,41 @@ class NeutronApiTest(test_utils.OpenStackBaseTest): class SecurityTest(test_utils.OpenStackBaseTest): - """Neutron APIsecurity tests tests.""" + """Neutron Security Tests.""" def test_security_checklist(self): """Verify expected state with security-checklist.""" - # Changes fixing the below expected failures will be made following - # this initial work to get validation in. There will be bugs targeted - # to each one and resolved independently where possible. - - expected_failures = [ - 'validate-enables-tls', - 'validate-uses-tls-for-keystone', - ] + expected_failures = [] expected_passes = [ 'validate-file-ownership', 'validate-file-permissions', - 'validate-uses-keystone', ] + expected_to_pass = True - for unit in zaza.model.get_units('neutron-api', + # override settings depending on application name so we can reuse + # the class for multiple charms + if self.application_name == 'neutron-api': + tls_checks = [ + 'validate-uses-tls-for-keystone', + ] + + expected_failures = [ + 'validate-enables-tls', # LP: #1851610 + ] + + expected_passes.append('validate-uses-keystone') + + if zaza.model.get_relation_id( + 'neutron-api', + 'vault', + remote_interface_name='certificates'): + expected_passes.extend(tls_checks) + else: + expected_failures.extend(tls_checks) + + expected_to_pass = False + + for unit in zaza.model.get_units(self.application_name, model_name=self.model_name): logging.info('Running `security-checklist` action' ' on unit {}'.format(unit.entity_id)) @@ -116,66 +420,278 @@ class SecurityTest(test_utils.OpenStackBaseTest): action_params={}), expected_passes, expected_failures, - expected_to_pass=False) + expected_to_pass=expected_to_pass) -class NeutronNetworkingTest(unittest.TestCase): - """Ensure that openstack instances have valid networking.""" +class NeutronOpenvSwitchTest(NeutronPluginApiSharedTests): + """Test basic Neutron Openvswitch Charm functionality.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running Neutron Openvswitch tests.""" + super(NeutronOpenvSwitchTest, cls).setUpClass() + + # set up client + cls.neutron_client = ( + openstack_utils.get_neutron_session_client(cls.keystone_session)) + + def test_101_neutron_sriov_config(self): + """Verify data in the sriov agent config file.""" + xenial_mitaka = openstack_utils.get_os_release('xenial_mitaka') + if self.current_os_release < xenial_mitaka: + logging.debug('Skipping test, sriov agent not supported on < ' + 'xenial/mitaka') + return + + zaza.model.set_application_config( + self.application_name, + {'enable-sriov': 'True'}) + + zaza.model.wait_for_agent_status() + zaza.model.wait_for_application_states() + + self._check_settings_in_config( + self.application_name, + 'sriov-device-mappings', + 'physical_device_mappings', + ['', 'physnet42:eth42'], + 'sriov_nic', + '/etc/neutron/plugins/ml2/sriov_agent.ini') + + # the CI environment does not expose an actual SR-IOV NIC to the + # functional tests. consequently the neutron-sriov agent will not + # run, and the charm will update its status as such. this will prevent + # the success of pause/resume test. + # + # disable sriov after validation of config file is complete. + logging.info('Disabling SR-IOV after verifying config file data...') + + zaza.model.set_application_config( + self.application_name, + {'enable-sriov': 'False'}) + + logging.info('Waiting for config-changes to complete...') + zaza.model.wait_for_agent_status() + zaza.model.wait_for_application_states() + + logging.debug('OK') + + def _check_settings_in_config(self, service, charm_key, + config_file_key, vpair, + section, conf_file): + + set_default = {charm_key: vpair[0]} + set_alternate = {charm_key: vpair[1]} + app_name = service + + expected = { + section: { + config_file_key: [str(vpair[1])], + }, + } + + with self.config_change(set_default, + set_alternate, + application_name=app_name): + zaza.model.block_until_oslo_config_entries_match( + self.application_name, + conf_file, + expected, + ) + logging.debug('OK') + + def test_201_l2pop_propagation(self): + """Verify that l2pop setting propagates to neutron-ovs.""" + self._check_settings_in_config( + 'neutron-api', + 'l2-population', + 'l2_population', + [False, True], + 'agent', + '/etc/neutron/plugins/ml2/openvswitch_agent.ini') + + def test_202_nettype_propagation(self): + """Verify that nettype setting propagates to neutron-ovs.""" + self._check_settings_in_config( + 'neutron-api', + 'overlay-network-type', + 'tunnel_types', + ['vxlan', 'gre'], + 'agent', + '/etc/neutron/plugins/ml2/openvswitch_agent.ini') + + def test_301_secgroup_propagation_local_override(self): + """Verify disable-security-groups overrides what neutron-api says.""" + if self.current_os_release >= self.trusty_mitaka: + conf_file = "/etc/neutron/plugins/ml2/openvswitch_agent.ini" + else: + conf_file = "/etc/neutron/plugins/ml2/ml2_conf.ini" + + with self.config_change( + {'neutron-security-groups': False}, + {'neutron-security-groups': True}, + application_name='neutron-api'): + with self.config_change( + {'disable-security-groups': False}, + {'disable-security-groups': True}): + zaza.model.block_until_oslo_config_entries_match( + self.application_name, + conf_file, + {'securitygroup': {'enable_security_group': ['False']}}) + + def test_401_restart_on_config_change(self): + """Verify that the specified services are restarted. + + When the config is changed we need to make sure that the services are + restarted. + """ + self.restart_on_changed( + '/etc/neutron/neutron.conf', + {'debug': False}, + {'debug': True}, + {'DEFAULT': {'debug': ['False']}}, + {'DEFAULT': {'debug': ['True']}}, + ['neutron-openvswitch-agent'], + pgrep_full=self.pgrep_full) + + def test_501_enable_qos(self): + """Check qos settings set via neutron-api charm.""" + if self.current_os_release < self.trusty_mitaka: + logging.debug('Skipping test') + return + + set_default = {'enable-qos': False} + set_alternate = {'enable-qos': True} + app_name = 'neutron-api' + + conf_file = '/etc/neutron/plugins/ml2/openvswitch_agent.ini' + expected = { + 'agent': { + 'extensions': ['qos'], + }, + } + + with self.config_change(set_default, + set_alternate, + application_name=app_name): + zaza.model.block_until_oslo_config_entries_match( + self.application_name, + conf_file, + expected, + ) + logging.debug('OK') + + def test_901_pause_and_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + with self.pause_resume(['neutron-openvswitch-agent'], + pgrep_full=self.pgrep_full): + logging.info('Testing pause resume') + + +class NeutronBridgePortMappingTest(NeutronPluginApiSharedTests): + """Test correct handling of network-bridge-port mapping functionality.""" + + def test_600_conflict_data_ext_ports(self): + """Verify proper handling of conflict between data-port and ext-port. + + Configuring ext-port and data-port at the same time should make the + charm to enter "blocked" state. After unsetting ext-port charm should + be active again. + """ + if self.application_name not in ["neutron-gateway", + "neutron-openvswitch"]: + logging.debug("Skipping test, charm under test is not " + "neutron-gateway or neutron-openvswitch") + return + + current_data_port = zaza.model.get_application_config( + self.application_name).get("data-port").get("value", "") + current_ext_port = zaza.model.get_application_config( + self.application_name).get("ext-port").get("value", "") + logging.debug("Current data-port: '{}'".format(current_data_port)) + logging.debug("Current data-port: '{}'".format(current_ext_port)) + + test_config = zaza.charm_lifecycle.utils.get_charm_config( + fatal=False) + current_state = test_config.get("target_deploy_status", {}) + blocked_state = copy.deepcopy(current_state) + blocked_state[self.application_name] = { + "workload-status": "blocked", + "workload-status-message": + "ext-port set when data-port set: see config.yaml"} + + logging.info("Setting conflicting ext-port and data-port options") + zaza.model.set_application_config( + self.application_name, {"data-port": "br-phynet43:eth43", + "ext-port": "br-phynet43:eth43"}) + zaza.model.wait_for_application_states(states=blocked_state) + + # unset ext-port and wait for app state to return to active + logging.info("Unsetting conflicting ext-port option") + zaza.model.set_application_config( + self.application_name, {"ext-port": ""}) + zaza.model.wait_for_application_states(states=current_state) + + # restore original config + zaza.model.set_application_config( + self.application_name, {'data-port': current_data_port, + 'ext-port': current_ext_port}) + zaza.model.wait_for_application_states(states=current_state) + logging.info('OK') + + +class NeutronOvsVsctlTest(NeutronPluginApiSharedTests): + """Test 'ovs-vsctl'-related functionality on Neutron charms.""" + + def test_800_ovs_bridges_are_managed_by_us(self): + """Checking OVS bridges' external-id. + + OVS bridges created by us should be marked as managed by us in their + external-id. See + http://docs.openvswitch.org/en/latest/topics/integration/ + """ + for unit in zaza.model.get_units(self.application_name, + model_name=self.model_name): + for bridge_name in ('br-int', 'br-ex'): + logging.info( + 'Checking that the bridge {}:{}'.format( + unit.name, bridge_name + ) + ' is marked as managed by us' + ) + expected_external_id = 'charm-{}=managed'.format( + self.application_name) + actual_external_id = zaza.model.run_on_unit( + unit.entity_id, + 'ovs-vsctl br-get-external-id {}'.format(bridge_name), + model_name=self.model_name + )['Stdout'].strip() + self.assertEqual(actual_external_id, expected_external_id) + + +class NeutronNetworkingBase(test_utils.OpenStackBaseTest): + """Base for checking openstack instances have valid networking.""" RESOURCE_PREFIX = 'zaza-neutrontests' @classmethod def setUpClass(cls): """Run class setup for running Neutron API Networking tests.""" - cls.keystone_session = ( - openstack_utils.get_overcloud_keystone_session()) - cls.nova_client = ( - openstack_utils.get_nova_session_client(cls.keystone_session)) - - @classmethod - def tearDown(cls): - """Remove test resources.""" - logging.info('Running teardown') - for server in cls.nova_client.servers.list(): - if server.name.startswith(cls.RESOURCE_PREFIX): - openstack_utils.delete_resource( - cls.nova_client.servers, - server.id, - msg="server") - - def test_instances_have_networking(self): - """Validate North/South and East/West networking.""" - guest.launch_instance( - glance_setup.LTS_IMAGE_NAME, - vm_name='{}-ins-1'.format(self.RESOURCE_PREFIX)) - guest.launch_instance( - glance_setup.LTS_IMAGE_NAME, - vm_name='{}-ins-2'.format(self.RESOURCE_PREFIX)) - - instance_1 = self.nova_client.servers.find( - name='{}-ins-1'.format(self.RESOURCE_PREFIX)) - - instance_2 = self.nova_client.servers.find( - name='{}-ins-2'.format(self.RESOURCE_PREFIX)) - - def verify(stdin, stdout, stderr): - """Validate that the SSH command exited 0.""" - self.assertEqual(stdout.channel.recv_exit_status(), 0) - - # Verify network from 1 to 2 - self.validate_instance_can_reach_other(instance_1, instance_2, verify) - - # Verify network from 2 to 1 - self.validate_instance_can_reach_other(instance_2, instance_1, verify) - - # Validate tenant to external network routing - self.validate_instance_can_reach_router(instance_1, verify) - self.validate_instance_can_reach_router(instance_2, verify) + super(NeutronNetworkingBase, cls).setUpClass( + application_name='neutron-api') + cls.neutron_client = ( + openstack_utils.get_neutron_session_client(cls.keystone_session)) + @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60), + reraise=True, stop=tenacity.stop_after_attempt(8)) def validate_instance_can_reach_other(self, instance_1, instance_2, - verify): + verify, + mtu=None): """ Validate that an instance can reach a fixed and floating of another. @@ -184,6 +700,12 @@ class NeutronNetworkingTest(unittest.TestCase): :param instance_2: The instance to check networking from :type instance_2: nova_client.Server + + :param verify: callback to verify result + :type verify: callable + + :param mtu: Check that we can send non-fragmented packets of given size + :type mtu: Optional[int] """ floating_1 = floating_ips_from_instance(instance_1)[0] floating_2 = floating_ips_from_instance(instance_2)[0] @@ -193,17 +715,30 @@ class NeutronNetworkingTest(unittest.TestCase): password = guest.boot_tests['bionic'].get('password') privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME) - openstack_utils.ssh_command( - username, floating_1, 'instance-1', - 'ping -c 1 {}'.format(address_2), - password=password, privkey=privkey, verify=verify) + cmds = [ + 'ping -c 1', + ] + if mtu: + # the on-wire packet will be 28 bytes larger than the value + # provided to ping(8) -s parameter + packetsize = mtu - 28 + cmds.append( + 'ping -M do -s {} -c 1'.format(packetsize)) - openstack_utils.ssh_command( - username, floating_1, 'instance-1', - 'ping -c 1 {}'.format(floating_2), - password=password, privkey=privkey, verify=verify) + for cmd in cmds: + openstack_utils.ssh_command( + username, floating_1, 'instance-1', + '{} {}'.format(cmd, address_2), + password=password, privkey=privkey, verify=verify) - def validate_instance_can_reach_router(self, instance, verify): + openstack_utils.ssh_command( + username, floating_1, 'instance-1', + '{} {}'.format(cmd, floating_2), + password=password, privkey=privkey, verify=verify) + + @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60), + reraise=True, stop=tenacity.stop_after_attempt(8)) + def validate_instance_can_reach_router(self, instance, verify, mtu=None): """ Validate that an instance can reach it's primary gateway. @@ -214,6 +749,12 @@ class NeutronNetworkingTest(unittest.TestCase): :param instance: The instance to check networking from :type instance: nova_client.Server + + :param verify: callback to verify result + :type verify: callable + + :param mtu: Check that we can send non-fragmented packets of given size + :type mtu: Optional[int] """ address = floating_ips_from_instance(instance)[0] @@ -221,10 +762,121 @@ class NeutronNetworkingTest(unittest.TestCase): password = guest.boot_tests['bionic'].get('password') privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME) - openstack_utils.ssh_command( - username, address, 'instance', 'ping -c 1 192.168.0.1', - password=password, privkey=privkey, verify=verify) - pass + cmds = [ + 'ping -c 1', + ] + if mtu: + # the on-wire packet will be 28 bytes larger than the value + # provided to ping(8) -s parameter + packetsize = mtu - 28 + cmds.append( + 'ping -M do -s {} -c 1'.format(packetsize)) + + for cmd in cmds: + openstack_utils.ssh_command( + username, address, 'instance', '{} 192.168.0.1'.format(cmd), + password=password, privkey=privkey, verify=verify) + + @tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60), + reraise=True, stop=tenacity.stop_after_attempt(8), + retry=tenacity.retry_if_exception_type(AssertionError)) + def check_server_state(self, nova_client, state, server_id=None, + server_name=None): + """Wait for server to reach desired state. + + :param nova_client: Nova client to use when checking status + :type nova_client: nova client + :param state: Target state for server + :type state: str + :param server_id: UUID of server to check + :type server_id: str + :param server_name: Name of server to check + :type server_name: str + :raises: AssertionError + """ + if server_name: + server_id = nova_client.servers.find(name=server_name).id + server = nova_client.servers.find(id=server_id) + assert server.status == state + + @tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60), + reraise=True, stop=tenacity.stop_after_attempt(8), + retry=tenacity.retry_if_exception_type(AssertionError)) + def check_neutron_agent_up(self, neutron_client, host_name): + """Wait for agents to come up. + + :param neutron_client: Neutron client to use when checking status + :type neutron_client: neutron client + :param host_name: The name of the host whose agents need checking + :type host_name: str + :raises: AssertionError + """ + for agent in neutron_client.list_agents()['agents']: + if agent['host'] == host_name: + assert agent['admin_state_up'] + assert agent['alive'] + + def effective_network_mtu(self, network_name): + """Retrieve effective MTU for a network. + + If the `instance-mtu` configuration option is set to a value lower than + the network MTU this method will return the value of that. Otherwise + Neutron's value for MTU on a network will be returned. + + :param network_name: Name of network to query + :type network_name: str + :returns: MTU for network + :rtype: int + """ + cfg_instance_mtu = None + for app in ('neutron-gateway', 'neutron-openvswitch'): + try: + cfg = zaza.model.get_application_config(app) + cfg_instance_mtu = int(cfg['instance-mtu']['value']) + break + except KeyError: + pass + + networks = self.neutron_client.show_network('', name=network_name) + network_mtu = int(next(iter(networks['networks']))['mtu']) + + if cfg_instance_mtu and cfg_instance_mtu < network_mtu: + logging.info('Using MTU from application "{}" config: {}' + .format(app, cfg_instance_mtu)) + return cfg_instance_mtu + else: + logging.info('Using MTU from network "{}": {}' + .format(network_name, network_mtu)) + return network_mtu + + def check_connectivity(self, instance_1, instance_2): + """Run North/South and East/West connectivity tests.""" + def verify(stdin, stdout, stderr): + """Validate that the SSH command exited 0.""" + self.assertEqual(stdout.channel.recv_exit_status(), 0) + + try: + mtu_1 = self.effective_network_mtu( + network_name_from_instance(instance_1)) + mtu_2 = self.effective_network_mtu( + network_name_from_instance(instance_2)) + mtu_min = min(mtu_1, mtu_2) + except neutronexceptions.NotFound: + # Older versions of OpenStack cannot look up network by name, just + # skip the check if that is the case. + mtu_1 = mtu_2 = mtu_min = None + + # Verify network from 1 to 2 + self.validate_instance_can_reach_other( + instance_1, instance_2, verify, mtu_min) + + # Verify network from 2 to 1 + self.validate_instance_can_reach_other( + instance_2, instance_1, verify, mtu_min) + + # Validate tenant to external network routing + self.validate_instance_can_reach_router(instance_1, verify, mtu_1) + self.validate_instance_can_reach_router(instance_2, verify, mtu_2) def floating_ips_from_instance(instance): @@ -253,6 +905,17 @@ def fixed_ips_from_instance(instance): return ips_from_instance(instance, 'fixed') +def network_name_from_instance(instance): + """Retrieve name of primary network the instance is attached to. + + :param instance: The instance to fetch name of network from. + :type instance: nova_client.Server + :returns: Name of primary network the instance is attached to. + :rtype: str + """ + return next(iter(instance.addresses)) + + def ips_from_instance(instance, ip_type): """ Retrieve IPs of a certain type from an instance. @@ -270,5 +933,119 @@ def ips_from_instance(instance, ip_type): "Only 'floating' and 'fixed' are valid IP types to search for" ) return list([ - ip['addr'] for ip in instance.addresses['private'] + ip['addr'] for ip in instance.addresses[ + network_name_from_instance(instance)] if ip['OS-EXT-IPS:type'] == ip_type]) + + +class NeutronNetworkingTest(NeutronNetworkingBase): + """Ensure that openstack instances have valid networking.""" + + def test_instances_have_networking(self): + """Validate North/South and East/West networking. + + Tear down can optionally be disabled by setting the module path + + class name + run_tearDown key under the `tests_options` key in + tests.yaml. + + Abbreviated example: + ...charm_tests.neutron.tests.NeutronNetworkingTest.run_tearDown: false + """ + instance_1, instance_2 = self.retrieve_guests() + if not all([instance_1, instance_2]): + self.launch_guests() + instance_1, instance_2 = self.retrieve_guests() + self.check_connectivity(instance_1, instance_2) + self.run_resource_cleanup = self.get_my_tests_options( + 'run_resource_cleanup', True) + + +class NeutronNetworkingVRRPTests(NeutronNetworkingBase): + """Check networking when gateways are restarted.""" + + def test_gateway_failure(self): + """Validate networking in the case of a gateway failure.""" + instance_1, instance_2 = self.retrieve_guests() + if not all([instance_1, instance_2]): + self.launch_guests() + instance_1, instance_2 = self.retrieve_guests() + self.check_connectivity(instance_1, instance_2) + + routers = self.neutron_client.list_routers( + name='provider-router')['routers'] + assert len(routers) == 1, "Unexpected router count {}".format( + len(routers)) + provider_router = routers[0] + l3_agents = self.neutron_client.list_l3_agent_hosting_routers( + router=provider_router['id'])['agents'] + logging.info( + 'Checking there are multiple L3 agents running tenant router') + assert len(l3_agents) == 2, "Unexpected l3 agent count {}".format( + len(l3_agents)) + uc_ks_session = openstack_utils.get_undercloud_keystone_session() + uc_nova_client = openstack_utils.get_nova_session_client(uc_ks_session) + uc_neutron_client = openstack_utils.get_neutron_session_client( + uc_ks_session) + for agent in l3_agents: + gateway_hostname = agent['host'] + gateway_server = uc_nova_client.servers.find(name=gateway_hostname) + logging.info("Shutting down {}".format(gateway_hostname)) + gateway_server.stop() + self.check_server_state( + uc_nova_client, + 'SHUTOFF', + server_name=gateway_hostname) + self.check_connectivity(instance_1, instance_2) + gateway_server.start() + self.check_server_state( + uc_nova_client, + 'ACTIVE', + server_name=gateway_hostname) + self.check_neutron_agent_up( + uc_neutron_client, + gateway_hostname) + self.check_connectivity(instance_1, instance_2) + + +class NeutronOVSDeferredRestartTest(test_utils.BaseDeferredRestartTest): + """Deferred restart tests.""" + + @classmethod + def setUpClass(cls): + """Run setup for deferred restart tests.""" + super().setUpClass(application_name='neutron-openvswitch') + + def run_tests(self): + """Run deferred restart tests.""" + # Trigger a config change which triggers a deferred hook. + self.run_charm_change_hook_test('config-changed') + + # Trigger a package change which requires a restart + self.run_package_change_test( + 'openvswitch-switch', + 'openvswitch-switch') + + +class NeutronGatewayDeferredRestartTest(test_utils.BaseDeferredRestartTest): + """Deferred restart tests.""" + + @classmethod + def setUpClass(cls): + """Run setup for deferred restart tests.""" + super().setUpClass(application_name='neutron-gateway') + + def run_tests(self): + """Run deferred restart tests.""" + # Trigger a config change which requires a restart + self.run_charm_change_restart_test( + 'neutron-l3-agent', + '/etc/neutron/neutron.conf') + + # Trigger a package change which requires a restart + self.run_package_change_test( + 'openvswitch-switch', + 'openvswitch-switch') + + def check_clear_hooks(self): + """Gateway does not defer hooks so noop.""" + return diff --git a/zaza/openstack/charm_tests/neutron_arista/__init__.py b/zaza/openstack/charm_tests/neutron_arista/__init__.py new file mode 100644 index 0000000..c0eae4e --- /dev/null +++ b/zaza/openstack/charm_tests/neutron_arista/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing neutron-api-plugin-arista.""" diff --git a/zaza/openstack/charm_tests/neutron_arista/setup.py b/zaza/openstack/charm_tests/neutron_arista/setup.py new file mode 100644 index 0000000..f439abd --- /dev/null +++ b/zaza/openstack/charm_tests/neutron_arista/setup.py @@ -0,0 +1,85 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for setting up neutron-api-plugin-arista.""" + +import logging +import os +import tenacity +import zaza +import zaza.openstack.charm_tests.neutron_arista.utils as arista_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +def download_arista_image(): + """Download arista-cvx-virt-test.qcow2 from a web server. + + The download will happen only if the env var TEST_ARISTA_IMAGE_REMOTE has + been set, so you don't have to set it if you already have the image + locally. + + If the env var TEST_ARISTA_IMAGE_LOCAL isn't set, it will be set to + `/tmp/arista-cvx-virt-test.qcow2`. This is where the image will be + downloaded to if TEST_ARISTA_IMAGE_REMOTE has been set. + """ + try: + os.environ['TEST_ARISTA_IMAGE_LOCAL'] + except KeyError: + os.environ['TEST_ARISTA_IMAGE_LOCAL'] = '' + if not os.environ['TEST_ARISTA_IMAGE_LOCAL']: + os.environ['TEST_ARISTA_IMAGE_LOCAL'] \ + = '/tmp/arista-cvx-virt-test.qcow2' + + try: + if os.environ['TEST_ARISTA_IMAGE_REMOTE']: + logging.info('Downloading Arista image from {}' + .format(os.environ['TEST_ARISTA_IMAGE_REMOTE'])) + + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(3), + reraise=True): + with attempt: + openstack_utils.download_image( + os.environ['TEST_ARISTA_IMAGE_REMOTE'], + os.environ['TEST_ARISTA_IMAGE_LOCAL']) + + except KeyError: + # TEST_ARISTA_IMAGE_REMOTE isn't set, which means the image is already + # available at TEST_ARISTA_IMAGE_LOCAL + pass + + logging.info('Arista image can be found at {}' + .format(os.environ['TEST_ARISTA_IMAGE_LOCAL'])) + + +def test_fixture(): + """Pass arista-virt-test-fixture's IP address to Neutron.""" + fixture_ip_addr = arista_utils.fixture_ip_addr() + logging.info( + "{}'s IP address is '{}'. Passing it to {}..." + .format(arista_utils.FIXTURE_APP_NAME, fixture_ip_addr, + arista_utils.PLUGIN_APP_NAME)) + zaza.model.set_application_config(arista_utils.PLUGIN_APP_NAME, + {'eapi-host': fixture_ip_addr}) + + logging.info('Waiting for {} to become ready...'.format( + arista_utils.PLUGIN_APP_NAME)) + zaza.model.wait_for_agent_status() + zaza.model.wait_for_application_states() + for attempt in tenacity.Retrying( + wait=tenacity.wait_fixed(10), # seconds + stop=tenacity.stop_after_attempt(30), + reraise=True): + with attempt: + arista_utils.query_fixture_networks(fixture_ip_addr) diff --git a/zaza/openstack/charm_tests/neutron_arista/tests.py b/zaza/openstack/charm_tests/neutron_arista/tests.py new file mode 100644 index 0000000..f9eb2fb --- /dev/null +++ b/zaza/openstack/charm_tests/neutron_arista/tests.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulating `neutron-api-plugin-arista` testing.""" + +import logging +import tenacity +import zaza.openstack.charm_tests.neutron.tests as neutron_tests +import zaza.openstack.charm_tests.neutron_arista.utils as arista_utils + + +class NeutronCreateAristaNetworkTest(neutron_tests.NeutronCreateNetworkTest): + """Test creating an Arista Neutron network through the API.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running Neutron Arista tests.""" + super(NeutronCreateAristaNetworkTest, cls).setUpClass() + cls._wait_for_neutron_ready() + + def _assert_test_network_exists_and_return_id(self): + logging.info('Checking that the test network exists on the Arista ' + 'test fixture...') + + # Sometimes the API call from Neutron to Arista fails and Neutron + # retries a couple of seconds later, which is why the newly created + # test network may not be immediately visible on Arista's API. + # NOTE(lourot): I experienced a run where it took 53 seconds. + for attempt in tenacity.Retrying( + wait=tenacity.wait_fixed(10), # seconds + stop=tenacity.stop_after_attempt(12), + reraise=True): + with attempt: + actual_network_names = arista_utils.query_fixture_networks( + arista_utils.fixture_ip_addr()) + self.assertEqual(actual_network_names, [self._TEST_NET_NAME]) + + return super(NeutronCreateAristaNetworkTest, + self)._assert_test_network_exists_and_return_id() + + def _assert_test_network_doesnt_exist(self): + logging.info("Checking that the test network doesn't exist on the " + "Arista test fixture...") + + for attempt in tenacity.Retrying( + wait=tenacity.wait_fixed(10), # seconds + stop=tenacity.stop_after_attempt(12), + reraise=True): + with attempt: + actual_network_names = arista_utils.query_fixture_networks( + arista_utils.fixture_ip_addr()) + self.assertEqual(actual_network_names, []) + + super(NeutronCreateAristaNetworkTest, + self)._assert_test_network_doesnt_exist() diff --git a/zaza/openstack/charm_tests/neutron_arista/utils.py b/zaza/openstack/charm_tests/neutron_arista/utils.py new file mode 100644 index 0000000..6fb77b3 --- /dev/null +++ b/zaza/openstack/charm_tests/neutron_arista/utils.py @@ -0,0 +1,68 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Common Arista-related utils.""" + +import json +import requests +import urllib3 +import zaza + +FIXTURE_APP_NAME = 'arista-virt-test-fixture' +PLUGIN_APP_NAME = 'neutron-api-plugin-arista' + + +def fixture_ip_addr(): + """Return the public IP address of the Arista test fixture.""" + return zaza.model.get_units(FIXTURE_APP_NAME)[0].public_address + + +_FIXTURE_LOGIN = 'admin' +_FIXTURE_PASSWORD = 'password123' + + +def query_fixture_networks(ip_addr): + """Query the Arista test fixture's list of networks.""" + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + session = requests.Session() + session.headers['Content-Type'] = 'application/json' + session.headers['Accept'] = 'application/json' + session.verify = False + session.auth = (_FIXTURE_LOGIN, _FIXTURE_PASSWORD) + + data = { + 'id': 'Zaza {} tests'.format(PLUGIN_APP_NAME), + 'method': 'runCmds', + 'jsonrpc': '2.0', + 'params': { + 'timestamps': False, + 'format': 'json', + 'version': 1, + 'cmds': ['show openstack networks'] + } + } + + response = session.post( + 'https://{}/command-api/'.format(ip_addr), + data=json.dumps(data), + timeout=10 # seconds + ) + + result = [] + for region in response.json()['result'][0]['regions'].values(): + for tenant in region['tenants'].values(): + for network in tenant['tenantNetworks'].values(): + result.append(network['networkName']) + return result diff --git a/zaza/openstack/charm_tests/nova/setup.py b/zaza/openstack/charm_tests/nova/setup.py index c556c97..7f8ee53 100644 --- a/zaza/openstack/charm_tests/nova/setup.py +++ b/zaza/openstack/charm_tests/nova/setup.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Code for configureing nova.""" +"""Code for configuring nova.""" + +import tenacity import zaza.openstack.utilities.openstack as openstack_utils from zaza.openstack.utilities import ( @@ -21,6 +23,9 @@ from zaza.openstack.utilities import ( import zaza.openstack.charm_tests.nova.utils as nova_utils +@tenacity.retry(stop=tenacity.stop_after_attempt(3), + wait=tenacity.wait_exponential( + multiplier=1, min=2, max=10)) def create_flavors(nova_client=None): """Create basic flavors. @@ -43,6 +48,9 @@ def create_flavors(nova_client=None): flavorid=nova_utils.FLAVORS[flavor]['flavorid']) +@tenacity.retry(stop=tenacity.stop_after_attempt(3), + wait=tenacity.wait_exponential( + multiplier=1, min=2, max=10)) def manage_ssh_key(nova_client=None): """Create basic flavors. diff --git a/zaza/openstack/charm_tests/nova/tests.py b/zaza/openstack/charm_tests/nova/tests.py index f750b5a..66ca957 100644 --- a/zaza/openstack/charm_tests/nova/tests.py +++ b/zaza/openstack/charm_tests/nova/tests.py @@ -16,13 +16,17 @@ """Encapsulate nova testing.""" +import json import logging import unittest +from configparser import ConfigParser +from time import sleep import zaza.model -import zaza.openstack.charm_tests.test_utils as test_utils import zaza.openstack.charm_tests.glance.setup as glance_setup +import zaza.openstack.charm_tests.test_utils as test_utils import zaza.openstack.configure.guest +import zaza.openstack.utilities.openstack as openstack_utils class BaseGuestCreateTest(unittest.TestCase): @@ -36,27 +40,294 @@ class BaseGuestCreateTest(unittest.TestCase): zaza.openstack.configure.guest.launch_instance(instance_key) -class CirrosGuestCreateTest(BaseGuestCreateTest): +class CirrosGuestCreateTest(test_utils.OpenStackBaseTest): """Tests to launch a cirros image.""" def test_launch_small_instance(self): """Launch a cirros instance and test connectivity.""" - zaza.openstack.configure.guest.launch_instance( - glance_setup.CIRROS_IMAGE_NAME) + self.RESOURCE_PREFIX = 'zaza-nova' + self.launch_guest( + 'cirros', instance_key=glance_setup.CIRROS_IMAGE_NAME) + + def tearDown(self): + """Cleanup of VM guests.""" + self.resource_cleanup() -class LTSGuestCreateTest(BaseGuestCreateTest): +class LTSGuestCreateTest(test_utils.OpenStackBaseTest): """Tests to launch a LTS image.""" def test_launch_small_instance(self): """Launch a Bionic instance and test connectivity.""" - zaza.openstack.configure.guest.launch_instance( - glance_setup.LTS_IMAGE_NAME) + self.RESOURCE_PREFIX = 'zaza-nova' + self.launch_guest( + 'ubuntu', instance_key=glance_setup.LTS_IMAGE_NAME) + + def tearDown(self): + """Cleanup of VM guests.""" + self.resource_cleanup() -class NovaCompute(test_utils.OpenStackBaseTest): +class LTSGuestCreateVolumeBackedTest(test_utils.OpenStackBaseTest): + """Tests to launch a LTS image.""" + + def test_launch_small_instance(self): + """Launch a Bionic instance and test connectivity.""" + self.RESOURCE_PREFIX = 'zaza-nova' + self.launch_guest( + 'volume-backed-ubuntu', instance_key=glance_setup.LTS_IMAGE_NAME, + use_boot_volume=True) + + def tearDown(self): + """Cleanup of VM guests.""" + self.resource_cleanup() + + +class NovaCommonTests(test_utils.OpenStackBaseTest): + """nova-compute and nova-cloud-controller common tests.""" + + XENIAL_MITAKA = openstack_utils.get_os_release('xenial_mitaka') + XENIAL_OCATA = openstack_utils.get_os_release('xenial_ocata') + XENIAL_QUEENS = openstack_utils.get_os_release('xenial_queens') + BIONIC_QUEENS = openstack_utils.get_os_release('bionic_queens') + BIONIC_ROCKY = openstack_utils.get_os_release('bionic_rocky') + BIONIC_TRAIN = openstack_utils.get_os_release('bionic_train') + + @classmethod + def setUpClass(cls): + """Run class setup for running nova-cloud-controller tests.""" + super(NovaCommonTests, cls).setUpClass() + cls.current_release = openstack_utils.get_os_release() + + def _test_pci_alias_config(self, app_name, service_list): + logging.info('Checking pci aliases in nova config...') + + # Expected default and alternate values + current_value = zaza.model.get_application_config( + app_name)['pci-alias'] + try: + current_value = current_value['value'] + except KeyError: + current_value = None + new_value = '[{}, {}]'.format( + json.dumps({ + 'name': 'IntelNIC', + 'capability_type': 'pci', + 'product_id': '1111', + 'vendor_id': '8086', + 'device_type': 'type-PF' + }, sort_keys=True), + json.dumps({ + 'name': ' Cirrus Logic ', + 'capability_type': 'pci', + 'product_id': '0ff2', + 'vendor_id': '10de', + 'device_type': 'type-PCI' + }, sort_keys=True)) + + set_default = {'pci-alias': current_value} + set_alternate = {'pci-alias': new_value} + + expected_conf_section = 'pci' + expected_conf_key = 'alias' + + default_entry = {expected_conf_section: {}} + alternate_entry = {expected_conf_section: { + expected_conf_key: [ + ('{"capability_type": "pci", "device_type": "type-PF", ' + '"name": "IntelNIC", "product_id": "1111", ' + '"vendor_id": "8086"}'), + ('{"capability_type": "pci", "device_type": "type-PCI", ' + '"name": " Cirrus Logic ", "product_id": "0ff2", ' + '"vendor_id": "10de"}')]}} + + # Config file affected by juju set config change + conf_file = '/etc/nova/nova.conf' + + # Make config change, check for service restarts + logging.info( + 'Setting config on {} to {}'.format(app_name, set_alternate)) + self.restart_on_changed( + conf_file, + set_default, + set_alternate, + default_entry, + alternate_entry, + service_list) + + +class CloudActions(test_utils.OpenStackBaseTest): + """Test actions from actions/cloud.py.""" + + def fetch_nova_service_hostname(self, unit_name): + """ + Fetch hostname used to register with nova-cloud-controller. + + When nova-compute registers with nova-cloud-controller it uses either + config variable from '/etc/nova/nova.conf` or host's hostname to + identify itself. We need to fetch this value directly from the unit, + otherwise it's not possible to correlate entries from + `nova service-list` with nova-compute units. + + :param unit_name: nova-compute unit name. + :return: hostname used when registering to cloud-controller + """ + nova_cfg = ConfigParser() + + result = zaza.model.run_on_unit(unit_name, + 'cat /etc/nova/nova.conf') + nova_cfg.read_string(result['Stdout']) + + try: + nova_service_name = nova_cfg['DEFAULT']['host'] + except KeyError: + # Fallback to hostname if 'host' variable is not present in the + # config + result = zaza.model.run_on_unit(unit_name, 'hostname') + nova_service_name = result['Stdout'].rstrip('\n') + + if not nova_service_name: + self.fail("Failed to fetch nova service name from" + " nova-compute unit.") + return nova_service_name + + def test_940_enable_disable_actions(self): + """Test disable/enable actions on nova-compute units.""" + nova_units = zaza.model.get_units('nova-compute', + model_name=self.model_name) + + # Check that nova-compute services are enabled before testing + for service in self.nova_client.services.list(binary='nova-compute'): + self.assertEqual(service.status, 'enabled') + + # Run 'disable' action on units + zaza.model.run_action_on_units([unit.name for unit in nova_units], + 'disable') + + # Check action results via nova API + for service in self.nova_client.services.list(binary='nova-compute'): + self.assertEqual(service.status, 'disabled') + + # Run 'enable' action on units + zaza.model.run_action_on_units([unit.name for unit in nova_units], + 'enable') + + # Check action results via nova API + for service in self.nova_client.services.list(binary='nova-compute'): + self.assertEqual(service.status, 'enabled') + + def test_950_instance_count_action(self): + """Test that action 'instance-count' returns expected values.""" + def check_instance_count(expect_count, unit_name): + """Assert that unit with 'unit_name' has 'expect_count' of VMs. + + :param expect_count: How many VMs are expected to be running + :param unit_name: Name of the target nova-compute unit + :return: None + :raises AssertionError: If result of the 'instance-count' action + does not match 'expect_count'. + """ + logging.debug('Running "instance-count" action on unit "{}".' + 'Expecting result: {}'.format(unit_name, + expect_count)) + result = zaza.model.run_action(unit_name, 'instance-count') + self.assertEqual(result.status, 'completed') + instances = result.data.get('results', {}).get('instance-count') + self.assertEqual(instances, str(expect_count)) + + nova_unit = zaza.model.get_units('nova-compute', + model_name=self.model_name)[0] + + check_instance_count(0, nova_unit.entity_id) + + self.RESOURCE_PREFIX = 'zaza-nova' + self.launch_guest( + 'ubuntu', instance_key=glance_setup.LTS_IMAGE_NAME) + + check_instance_count(1, nova_unit.entity_id) + + self.resource_cleanup() + + def test_960_remove_from_cloud_actions(self): + """Test actions remove-from-cloud and register-to-cloud. + + Note (martin-kalcok): This test requires that nova-compute unit is not + running any VMs. If there are any leftover VMs from previous tests, + action `remove-from-cloud` will fail. + """ + def wait_for_nova_compute_count(expected_count): + """Wait for expected number of nova compute services to be present. + + Returns True or False based on whether the expected number of nova + compute services was reached within the timeout. Checks are + performed every 10 second in the span of maximum 5 minutes. + """ + sleep_timeout = 1 # don't waste 10 seconds on the first run + + for _ in range(31): + sleep(sleep_timeout) + service_list = self.nova_client.services.list( + host=service_name, binary='nova-compute') + if len(service_list) == expected_count: + return True + sleep_timeout = 10 + return False + + all_units = zaza.model.get_units('nova-compute', + model_name=self.model_name) + + unit_to_remove = all_units[0] + + service_name = self.fetch_nova_service_hostname(unit_to_remove.name) + + registered_nova_services = self.nova_client.services.list( + host=service_name, binary='nova-compute') + + service_count = len(registered_nova_services) + if service_count < 1: + self.fail("Unit '{}' has no nova-compute services registered in" + " nova-cloud-controller".format(unit_to_remove.name)) + elif service_count > 1: + self.fail("Unexpected number of nova-compute services registered" + " in nova-cloud controller. Expecting: 1, found: " + "{}".format(service_count)) + + # run action remove-from-cloud and wait for the results in + # nova-cloud-controller + zaza.model.run_action_on_units([unit_to_remove.name], + 'remove-from-cloud', + raise_on_failure=True) + + # Wait for nova-compute service to be removed from the + # nova-cloud-controller + if not wait_for_nova_compute_count(0): + self.fail("nova-compute service was not unregistered from the " + "nova-cloud-controller as expected.") + + # run action register-to-cloud to revert previous action + # and wait for the results in nova-cloud-controller + zaza.model.run_action_on_units([unit_to_remove.name], + 'register-to-cloud', + raise_on_failure=True) + + if not wait_for_nova_compute_count(1): + self.fail("nova-compute service was not re-registered to the " + "nova-cloud-controller as expected.") + + +class NovaCompute(NovaCommonTests): """Run nova-compute specific tests.""" + def test_311_pci_alias_config_compute(self): + """Verify that the pci alias data is rendered properly. + + Change pci-alias and assert that change propagates to the correct + file and that services are restarted as a result + """ + # We are not touching the behavior of anything older than QUEENS + if self.current_release >= self.XENIAL_QUEENS: + self._test_pci_alias_config("nova-compute", ['nova-compute']) + def test_500_hugepagereport_action(self): """Test hugepagereport action.""" for unit in zaza.model.get_units('nova-compute', @@ -75,32 +346,16 @@ class NovaCompute(test_utils.OpenStackBaseTest): def test_900_restart_on_config_change(self): """Checking restart happens on config change. - Change disk format and assert then change propagates to the correct + Change debug mode and assert that change propagates to the correct file and that services are restarted as a result """ - # Expected default and alternate values - current_value = zaza.model.get_application_config( - 'nova-compute')['debug']['value'] - new_value = str(not bool(current_value)).title() - current_value = str(current_value).title() - - set_default = {'debug': current_value} - set_alternate = {'debug': new_value} - default_entry = {'DEFAULT': {'debug': [current_value]}} - alternate_entry = {'DEFAULT': {'debug': [new_value]}} - # Config file affected by juju set config change conf_file = '/etc/nova/nova.conf' # Make config change, check for service restarts - logging.info( - 'Setting verbose on nova-compute {}'.format(set_alternate)) - self.restart_on_changed( + logging.info('Changing the debug config on nova-compute') + self.restart_on_changed_debug_oslo_config_file( conf_file, - set_default, - set_alternate, - default_entry, - alternate_entry, ['nova-compute']) def test_920_change_aa_profile(self): @@ -156,12 +411,325 @@ class NovaCompute(test_utils.OpenStackBaseTest): self.assertFalse(int(run['Code']) == 0) +class NovaComputeActionTest(test_utils.OpenStackBaseTest): + """Run nova-compute specific tests. + + Add this test class for new nova-compute action + to avoid breaking older version + """ + + def test_virsh_audit_action(self): + """Test virsh-audit action.""" + for unit in zaza.model.get_units('nova-compute', + model_name=self.model_name): + logging.info('Running `virsh-audit` action' + ' on unit {}'.format(unit.entity_id)) + action = zaza.model.run_action( + unit.entity_id, + 'virsh-audit', + model_name=self.model_name, + action_params={}) + if "failed" in action.data["status"]: + raise Exception( + "The action failed: {}".format(action.data["message"])) + + +class NovaCloudControllerActionTest(test_utils.OpenStackBaseTest): + """Run nova-cloud-controller specific tests. + + Add this test class for new nova-cloud-controller action + to avoid breaking older version. + """ + + def test_sync_compute_az_action(self): + """Test sync-compute-availability-zones action.""" + juju_units_az_map = {} + compute_config = zaza.model.get_application_config('nova-compute') + default_az = compute_config['default-availability-zone']['value'] + use_juju_az = compute_config['customize-failure-domain']['value'] + + for unit in zaza.model.get_units('nova-compute', + model_name=self.model_name): + zone = default_az + if use_juju_az: + result = zaza.model.run_on_unit(unit.name, + 'echo $JUJU_AVAILABILITY_ZONE', + model_name=self.model_name, + timeout=60) + self.assertEqual(int(result['Code']), 0) + juju_az = result['Stdout'].strip() + if juju_az: + zone = juju_az + + juju_units_az_map[unit.public_address] = zone + continue + + session = openstack_utils.get_overcloud_keystone_session() + nova = openstack_utils.get_nova_session_client(session) + + result = zaza.model.run_action_on_leader( + 'nova-cloud-controller', + 'sync-compute-availability-zones', + model_name=self.model_name) + + # For validating the action results, we simply want to validate that + # the action was completed and we have something in the output. The + # functional validation really occurs below, in that the hosts are + # checked to be in the appropriate host aggregates. + self.assertEqual(result.status, 'completed') + self.assertNotEqual('', result.results['output']) + + unique_az_list = list(set(juju_units_az_map.values())) + aggregates = nova.aggregates.list() + self.assertEqual(len(aggregates), len(unique_az_list)) + for unit_address in juju_units_az_map: + az = juju_units_az_map[unit_address] + aggregate = nova.aggregates.find( + name='{}_az'.format(az), availability_zone=az) + hypervisor = nova.hypervisors.find(host_ip=unit_address) + self.assertIn(hypervisor.hypervisor_hostname, aggregate.hosts) + + +class NovaCloudController(NovaCommonTests): + """Run nova-cloud-controller specific tests.""" + + @property + def services(self): + """Return a list of services for the selected OpenStack release.""" + services = ['nova-scheduler', 'nova-conductor'] + if self.current_release <= self.BIONIC_QUEENS: + services.append('nova-api-os-compute') + if self.current_release <= self.XENIAL_MITAKA: + services.append('nova-cert') + if self.current_release >= self.XENIAL_OCATA: + services.append('apache2') + return services + + def test_104_compute_api_functionality(self): + """Verify basic compute API functionality.""" + logging.info('Instantiating nova client...') + keystone_session = openstack_utils.get_overcloud_keystone_session() + nova = openstack_utils.get_nova_session_client(keystone_session) + + logging.info('Checking api functionality...') + + actual_service_names = [service.to_dict()['binary'] for service in + nova.services.list()] + for expected_service_name in ('nova-scheduler', 'nova-conductor', + 'nova-compute'): + assert(expected_service_name in actual_service_names) + + # Thanks to setup.create_flavors we should have a few flavors already: + assert(len(nova.flavors.list()) > 0) + + # Just checking it's not raising and returning an iterable: + assert(len(nova.servers.list()) >= 0) + + def test_106_compute_catalog_endpoints(self): + """Verify that the compute endpoints are present in the catalog.""" + overcloud_auth = openstack_utils.get_overcloud_auth() + keystone_client = openstack_utils.get_keystone_client( + overcloud_auth) + actual_endpoints = keystone_client.service_catalog.get_endpoints() + + logging.info('Checking compute endpoints...') + + if self.current_release < self.XENIAL_QUEENS: + actual_compute_endpoints = actual_endpoints['compute'][0] + for expected_url in ('internalURL', 'adminURL', 'publicURL'): + assert(expected_url in actual_compute_endpoints) + else: + actual_compute_interfaces = [endpoint['interface'] for endpoint in + actual_endpoints['compute']] + for expected_interface in ('internal', 'admin', 'public'): + assert(expected_interface in actual_compute_interfaces) + + def test_220_nova_metadata_propagate(self): + """Verify that the vendor-data settings are propagated. + + Change vendor-data-url and assert that change propagates to the correct + file and that services are restarted as a result + """ + if self.current_release < self.BIONIC_ROCKY: + logging.info("Feature didn't exist before Rocky. Nothing to test") + return + + # Expected default and alternate values + current_value = zaza.model.get_application_config( + 'nova-cloud-controller')['vendor-data-url']['value'] + new_value = 'http://some-other.url/vdata' + + set_default = {'vendor-data-url': current_value} + set_alternate = {'vendor-data-url': new_value} + default_entry = {'api': { + 'vendordata_dynamic_targets': [current_value]}} + alternate_entry = {'api': {'vendordata_dynamic_targets': [new_value]}} + + # Config file affected by juju set config change + conf_file = '/etc/nova/nova.conf' + + # Make config change, check for service restarts + logging.info( + 'Setting config on nova-cloud-controller to {}'.format( + set_alternate)) + self.restart_on_changed( + conf_file, + set_default, + set_alternate, + default_entry, + alternate_entry, + self.services) + + def test_302_api_rate_limiting_is_enabled(self): + """Check that API rate limiting is enabled.""" + logging.info('Checking api-paste config file data...') + zaza.model.block_until_oslo_config_entries_match( + 'nova-cloud-controller', '/etc/nova/api-paste.ini', { + 'filter:legacy_ratelimit': { + 'limits': ["( POST, '*', .*, 9999, MINUTE );"]}}) + + def test_310_pci_alias_config_ncc(self): + """Verify that the pci alias data is rendered properly. + + Change pci-alias and assert that change propagates to the correct + file and that services are restarted as a result + """ + self._test_pci_alias_config("nova-cloud-controller", self.services) + + def test_900_restart_on_config_change(self): + """Checking restart happens on config change. + + Change debug mode and assert that change propagates to the correct + file and that services are restarted as a result + """ + # Config file affected by juju set config change + conf_file = '/etc/nova/nova.conf' + + # Make config change, check for service restarts + logging.info('Changing debug config on nova-cloud-controller') + self.restart_on_changed_debug_oslo_config_file( + conf_file, + self.services) + + def test_901_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + with self.pause_resume(self.services): + logging.info("Testing pause resume") + + def test_902_quota_settings(self): + """Verify that the quota settings are propagated. + + Change quota-instances and assert that change propagates to the correct + file and that services are restarted as a result + """ + # Expected default and alternate values + current_value = zaza.model.get_application_config( + 'nova-cloud-controller')['quota-instances'] + try: + current_value = current_value['value'] + except KeyError: + current_value = 0 + new_value = '20' + + set_default = {'quota-instances': current_value} + set_alternate = {'quota-instances': new_value} + + expected_conf_section = 'DEFAULT' + expected_conf_key = 'quota_instances' + if self.current_release >= self.XENIAL_OCATA: + expected_conf_section = 'quota' + expected_conf_key = 'instances' + + default_entry = {expected_conf_section: {}} + alternate_entry = {expected_conf_section: { + expected_conf_key: [new_value]}} + + # Config file affected by juju set config change + conf_file = '/etc/nova/nova.conf' + + # Make config change, check for service restarts + logging.info( + 'Setting config on nova-cloud-controller to {}'.format( + set_alternate)) + self.restart_on_changed( + conf_file, + set_default, + set_alternate, + default_entry, + alternate_entry, + self.services) + + def test_903_enable_quota_count_usage_from_placement(self): + """Verify that quota-count-usage-from-placement is propagated. + + Change quota-count-usage-from-placement and assert that nova + configuration file is updated and the services are restarted. + This parameter is not supported for releases Amphorae communication must + be generated and set trough charm configuration. + + The optional SSH configuration options are set to enable debug and log + collection from Amphorae, we will use the same keypair as Zaza uses for + instance creation. + + The `configure-resources` action must be run to have the charm create + in-cloud resources such as management network and associated ports and + security groups. + """ + # Set up Nova client to create/retrieve keypair for Amphora debug purposes. + # + # We reuse the Nova setup code for this and in most cases the test + # declaration will already defined that the Nova manage_ssh_key setup + # helper to run before we get here. Re-run here to make sure this setup + # function can be used separately, manage_ssh_key is idempotent. + keystone_session = openstack.get_overcloud_keystone_session() + nova_client = openstack.get_nova_session_client( + keystone_session) + nova_setup.manage_ssh_key(nova_client) + ssh_public_key = openstack.get_public_key( + nova_client, nova_utils.KEYPAIR_NAME) + # Generate certificates for controller/load balancer instance communication (issuing_cakey, issuing_cacert) = cert.generate_cert( 'OSCI Zaza Issuer', @@ -65,7 +93,7 @@ def configure_octavia(): issuer_name='OSCI Zaza Octavia Controller', signing_key=controller_cakey) controller_bundle = controller_cert + controller_key - cert_config = { + charm_config = { 'lb-mgmt-issuing-cacert': base64.b64encode( issuing_cacert).decode('utf-8'), 'lb-mgmt-issuing-ca-private-key': base64.b64encode( @@ -75,40 +103,44 @@ def configure_octavia(): controller_cacert).decode('utf-8'), 'lb-mgmt-controller-cert': base64.b64encode( controller_bundle).decode('utf-8'), + 'amp-ssh-key-name': 'octavia', + 'amp-ssh-pub-key': base64.b64encode( + bytes(ssh_public_key, 'utf-8')).decode('utf-8'), } - logging.info('Configuring certificates for mandatory Octavia ' - 'client/server authentication ' - '(client being the ``Amphorae`` load balancer instances)') + + # Tell Octavia charm it is safe to create cloud resources, we do this now + # because the workload status will be checked on config-change and it gets + # a bit complicated to augment test config to accept 'blocked' vs. 'active' + # in the various stages. + logging.info('Running `configure-resources` action on Octavia leader unit') + zaza.model.run_action_on_leader( + 'octavia', + 'configure-resources', + action_params={}) # Our expected workload status will change after we have configured the # certificates test_config = zaza.charm_lifecycle.utils.get_charm_config() del test_config['target_deploy_status']['octavia'] + logging.info('Configuring certificates for mandatory Octavia ' + 'client/server authentication ' + '(client being the ``Amphorae`` load balancer instances)') + _singleton = zaza.openstack.charm_tests.test_utils.OpenStackBaseTest() - _singleton.setUpClass() - with _singleton.config_change(cert_config, cert_config): + _singleton.setUpClass(application_name='octavia') + with _singleton.config_change(charm_config, charm_config): # wait for configuration to be applied then return pass - -def prepare_payload_instance(): - """Prepare a instance we can use as payload test.""" - session = openstack.get_overcloud_keystone_session() - keystone = openstack.get_keystone_session_client(session) - neutron = openstack.get_neutron_session_client(session) - project_id = openstack.get_project_id( - keystone, 'admin', domain_name='admin_domain') - openstack.add_neutron_secgroup_rules( - neutron, - project_id, - [{'protocol': 'tcp', - 'port_range_min': '80', - 'port_range_max': '80', - 'direction': 'ingress'}]) - zaza.openstack.configure.guest.launch_instance( - glance_setup.LTS_IMAGE_NAME, - userdata='#cloud-config\npackages:\n - apache2\n') + # Should we consider making the charm attempt to create this key on + # config-change? + logging.info('Running `configure-resources` action again to ensure ' + 'Octavia Nova SSH key pair is created after config change.') + zaza.model.run_action_on_leader( + 'octavia', + 'configure-resources', + action_params={}) def centralized_fip_network(): @@ -137,6 +169,9 @@ def centralized_fip_network(): 4: https://review.opendev.org/#/c/437986/ 5: https://review.opendev.org/#/c/466434/ """ + if not openstack.dvr_enabled(): + logging.info('DVR not enabled, skip.') + return keystone_session = openstack.get_overcloud_keystone_session() neutron_client = openstack.get_neutron_session_client( keystone_session) diff --git a/zaza/openstack/charm_tests/octavia/tests.py b/zaza/openstack/charm_tests/octavia/tests.py index 3c5ee71..295bd51 100644 --- a/zaza/openstack/charm_tests/octavia/tests.py +++ b/zaza/openstack/charm_tests/octavia/tests.py @@ -18,9 +18,83 @@ import logging import subprocess import tenacity +from keystoneauth1 import exceptions as keystone_exceptions +import octaviaclient.api.v2.octavia +import osc_lib.exceptions + import zaza.openstack.charm_tests.test_utils as test_utils import zaza.openstack.utilities.openstack as openstack_utils +from zaza.openstack.utilities import ObjectRetrierWraps + +LBAAS_ADMIN_ROLE = 'load-balancer_admin' + + +def _op_role_current_user(keystone_client, keystone_session, op, role_name, + scope=None): + """Perform role operation on current user. + + :param keystone_client: Keysonte cilent object + :type keystone_client: keystoneclient.v3.Client + :param keystone_session: Keystone session object + :type keystone_session: keystoneauth1.session.Session + :param op: Operation to perform, one of ('grant', 'revoke') + :type op: str + :param role_name: Name of role + :type role_name: str + :param scope: Scope to apply role to, one of ('domain', 'project'(default)) + :type scope: Optional[str] + :returns: the granted role returned from server. + :rtype: keystoneclient.v3.roles.Role + :raises: ValueError, keystoneauth1.exceptions.* + """ + allowed_ops = ('grant', 'revoke') + if op not in allowed_ops: + raise ValueError('op "{}" not in allowed_ops "{}"' + .format(op, allowed_ops)) + scope = scope or 'project' + allowed_scope = ('domain', 'project') + if scope not in allowed_scope: + raise ValueError('scope "{}" not in allowed_scope "{}"' + .format(scope, allowed_scope)) + + logging.info('{} "{}" role {} current user with "{}" scope...' + .format(op.capitalize(), role_name, + 'to' if op == 'grant' else 'from', + scope)) + role_method = getattr(keystone_client.roles, op) + token = keystone_session.get_token() + token_data = keystone_client.tokens.get_token_data(token) + role = keystone_client.roles.find(name=role_name) + + kwargs = { + 'user': token_data['token']['user']['id'], + scope: token_data['token'][scope]['id'], + } + return role_method( + role, + **kwargs) + + +def grant_role_current_user(keystone_client, keystone_session, role_name, + scope=None): + """Grant role to current user. + + Please refer to docstring for _op_role_current_user. + """ + _op_role_current_user( + keystone_client, keystone_session, 'grant', role_name, scope=scope) + + +def revoke_role_current_user(keystone_client, keystone_session, role_name, + scope=None): + """Grant role to current user. + + Please refer to docstring for _op_role_current_user. + """ + _op_role_current_user( + keystone_client, keystone_session, 'revoke', role_name, scope=scope) + class CharmOperationTest(test_utils.OpenStackBaseTest): """Charm operation tests.""" @@ -36,7 +110,20 @@ class CharmOperationTest(test_utils.OpenStackBaseTest): Pause service and check services are stopped, then resume and check they are started. """ - self.pause_resume(['apache2']) + services = [ + 'apache2', + 'octavia-health-manager', + 'octavia-housekeeping', + 'octavia-worker', + ] + if openstack_utils.ovn_present(): + services.append('octavia-driver-agent') + logging.info('Skipping pause resume test LP: #1886202...') + return + logging.info('Testing pause resume (services="{}")' + .format(services)) + with self.pause_resume(services, pgrep_full=True): + pass class LBAASv2Test(test_utils.OpenStackBaseTest): @@ -46,58 +133,168 @@ class LBAASv2Test(test_utils.OpenStackBaseTest): def setUpClass(cls): """Run class setup for running LBaaSv2 service tests.""" super(LBAASv2Test, cls).setUpClass() + cls.keystone_client = ObjectRetrierWraps( + openstack_utils.get_keystone_session_client(cls.keystone_session)) - def test_create_loadbalancer(self): - """Create load balancer.""" - keystone_session = openstack_utils.get_overcloud_keystone_session() - neutron_client = openstack_utils.get_neutron_session_client( - keystone_session) - nova_client = openstack_utils.get_nova_session_client( - keystone_session) + if (openstack_utils.get_os_release() >= + openstack_utils.get_os_release('focal_wallaby')): + # add role to admin user for the duration of the test + grant_role_current_user(cls.keystone_client, cls.keystone_session, + LBAAS_ADMIN_ROLE) - # Get IP of the prepared payload instances - payload_ips = [] - for server in nova_client.servers.list(): - payload_ips.append(server.networks['private'][0]) - self.assertTrue(len(payload_ips) > 0) + cls.neutron_client = ObjectRetrierWraps( + openstack_utils.get_neutron_session_client(cls.keystone_session)) + cls.octavia_client = ObjectRetrierWraps( + openstack_utils.get_octavia_session_client(cls.keystone_session)) + cls.RESOURCE_PREFIX = 'zaza-octavia' + + # NOTE(fnordahl): in the event of a test failure we do not want to run + # tear down code as it will make debugging a problem virtually + # impossible. To alleviate each test method will set the + # `run_tearDown` instance variable at the end which will let us run + # tear down only when there were no failure. + cls.run_tearDown = False + # List of load balancers created by this test + cls.loadbalancers = [] + # List of floating IPs created by this test + cls.fips = [] + + def _remove_amphorae_instances(self): + """Remove amphorae instances forcefully. + + In some situations Octavia is unable to remove load balancer resources. + This helper can be used to remove the underlying instances. + """ + result = self.octavia_client.amphora_list() + for amphora in result.get('amphorae', []): + for server in self.nova_client.servers.list(): + if 'compute_id' in amphora and server.id == amphora[ + 'compute_id']: + try: + openstack_utils.delete_resource( + self.nova_client.servers, + server.id, + msg="server") + except AssertionError as e: + logging.warning( + 'Gave up waiting for resource cleanup: "{}"' + .format(str(e))) + + @tenacity.retry(stop=tenacity.stop_after_attempt(3), + wait=tenacity.wait_exponential( + multiplier=1, min=2, max=10)) + def resource_cleanup(self, only_local=False): + """Remove resources created during test execution. + + :param only_local: When set to true do not call parent method + :type only_local: bool + """ + for lb in self.loadbalancers: + try: + self.octavia_client.load_balancer_delete( + lb['id'], cascade=True) + except octaviaclient.api.v2.octavia.OctaviaClientException as e: + logging.info('Octavia is unable to delete load balancer: "{}"' + .format(e)) + logging.info('Attempting to forcefully remove amphorae') + self._remove_amphorae_instances() + else: + try: + self.wait_for_lb_resource( + self.octavia_client.load_balancer_show, lb['id'], + provisioning_status='DELETED') + except osc_lib.exceptions.NotFound: + pass + # allow resource cleanup to be run multiple times + self.loadbalancers = [] + + if (openstack_utils.get_os_release() >= + openstack_utils.get_os_release('focal_wallaby')): + # revoke role from admin user added by this test + revoke_role_current_user(self.keystone_client, + self.keystone_session, + LBAAS_ADMIN_ROLE) + + for fip in self.fips: + self.neutron_client.delete_floatingip(fip) + # allow resource cleanup to be run multiple times + self.fips = [] + + if only_local: + return + + # we run the parent resource_cleanup last as it will remove instances + # referenced as members in the above cleaned up load balancers + super(LBAASv2Test, self).resource_cleanup() + + @staticmethod + @tenacity.retry(retry=tenacity.retry_if_exception_type(AssertionError), + wait=tenacity.wait_fixed(1), reraise=True, + stop=tenacity.stop_after_delay(900)) + def wait_for_lb_resource(octavia_show_func, resource_id, + provisioning_status=None, operating_status=None): + """Wait for loadbalancer resource to reach expected status.""" + provisioning_status = provisioning_status or 'ACTIVE' + resp = octavia_show_func(resource_id) + logging.info(resp['provisioning_status']) + assert resp['provisioning_status'] == provisioning_status, ( + 'load balancer resource has not reached ' + 'expected provisioning status: {}' + .format(resp)) + if operating_status: + logging.info(resp['operating_status']) + assert resp['operating_status'] == operating_status, ( + 'load balancer resource has not reached ' + 'expected operating status: {}'.format(resp)) + + return resp + + @staticmethod + def get_lb_providers(octavia_client): + """Retrieve loadbalancer providers. + + :param octavia_client: Octavia client object + :type octavia_client: OctaviaAPI + :returns: Dictionary with provider information, name as keys + :rtype: Dict[str,Dict[str,str]] + """ + providers = { + provider['name']: provider + for provider in octavia_client.provider_list().get('providers', []) + if provider['name'] != 'octavia' # alias for `amphora`, skip + } + return providers + + def _create_lb_resources(self, octavia_client, provider, vip_subnet_id, + member_subnet_id, payload_ips): + # The `amphora` provider is required for load balancing based on + # higher layer protocols + if provider == 'amphora': + protocol = 'HTTP' + algorithm = 'ROUND_ROBIN' + monitor = True + else: + protocol = 'TCP' + algorithm = 'SOURCE_IP_PORT' + monitor = False - resp = neutron_client.list_networks(name='private_lb_fip_network') - vip_subnet_id = resp['networks'][0]['subnets'][0] - resp = neutron_client.list_networks(name='private') - subnet_id = resp['networks'][0]['subnets'][0] - octavia_client = openstack_utils.get_octavia_session_client( - keystone_session) result = octavia_client.load_balancer_create( json={ 'loadbalancer': { 'description': 'Created by Zaza', 'admin_state_up': True, 'vip_subnet_id': vip_subnet_id, - 'name': 'zaza-lb-0', + 'name': 'zaza-{}-0'.format(provider), + 'provider': provider, }}) - lb_id = result['loadbalancer']['id'] - lb_vip_port_id = result['loadbalancer']['vip_port_id'] + lb = result['loadbalancer'] + self.loadbalancers.append(lb) + lb_id = lb['id'] - @tenacity.retry(wait=tenacity.wait_fixed(1), - reraise=True, stop=tenacity.stop_after_delay(900)) - def wait_for_lb_resource(octavia_show_func, resource_id, - operating_status=None): - resp = octavia_show_func(resource_id) - logging.info(resp['provisioning_status']) - assert resp['provisioning_status'] == 'ACTIVE', ( - 'load balancer resource has not reached ' - 'expected provisioning status: {}' - .format(resp)) - if operating_status: - logging.info(resp['operating_status']) - assert resp['operating_status'] == operating_status, ( - 'load balancer resource has not reached ' - 'expected operating status: {}'.format(resp)) - - return resp logging.info('Awaiting loadbalancer to reach provisioning_status ' '"ACTIVE"') - resp = wait_for_lb_resource(octavia_client.load_balancer_show, lb_id) + resp = self.wait_for_lb_resource( + octavia_client.load_balancer_show, lb_id) logging.info(resp) result = octavia_client.listener_create( @@ -105,14 +302,15 @@ class LBAASv2Test(test_utils.OpenStackBaseTest): 'listener': { 'loadbalancer_id': lb_id, 'name': 'listener1', - 'protocol': 'HTTP', + 'protocol': protocol, 'protocol_port': 80 }, }) listener_id = result['listener']['id'] logging.info('Awaiting listener to reach provisioning_status ' '"ACTIVE"') - resp = wait_for_lb_resource(octavia_client.listener_show, listener_id) + resp = self.wait_for_lb_resource( + octavia_client.listener_show, listener_id) logging.info(resp) result = octavia_client.pool_create( @@ -120,40 +318,42 @@ class LBAASv2Test(test_utils.OpenStackBaseTest): 'pool': { 'listener_id': listener_id, 'name': 'pool1', - 'lb_algorithm': 'ROUND_ROBIN', - 'protocol': 'HTTP', + 'lb_algorithm': algorithm, + 'protocol': protocol, }, }) pool_id = result['pool']['id'] logging.info('Awaiting pool to reach provisioning_status ' '"ACTIVE"') - resp = wait_for_lb_resource(octavia_client.pool_show, pool_id) + resp = self.wait_for_lb_resource(octavia_client.pool_show, pool_id) logging.info(resp) - result = octavia_client.health_monitor_create( - json={ - 'healthmonitor': { - 'pool_id': pool_id, - 'delay': 5, - 'max_retries': 4, - 'timeout': 10, - 'type': 'HTTP', - 'url_path': '/', - }, - }) - healthmonitor_id = result['healthmonitor']['id'] - logging.info('Awaiting healthmonitor to reach provisioning_status ' - '"ACTIVE"') - resp = wait_for_lb_resource(octavia_client.health_monitor_show, - healthmonitor_id) - logging.info(resp) + if monitor: + result = octavia_client.health_monitor_create( + json={ + 'healthmonitor': { + 'pool_id': pool_id, + 'delay': 5, + 'max_retries': 4, + 'timeout': 10, + 'type': 'HTTP', + 'url_path': '/', + }, + }) + healthmonitor_id = result['healthmonitor']['id'] + logging.info('Awaiting healthmonitor to reach provisioning_status ' + '"ACTIVE"') + resp = self.wait_for_lb_resource( + octavia_client.health_monitor_show, + healthmonitor_id) + logging.info(resp) for ip in payload_ips: result = octavia_client.member_create( pool_id=pool_id, json={ 'member': { - 'subnet_id': subnet_id, + 'subnet_id': member_subnet_id, 'address': ip, 'protocol_port': 80, }, @@ -161,25 +361,95 @@ class LBAASv2Test(test_utils.OpenStackBaseTest): member_id = result['member']['id'] logging.info('Awaiting member to reach provisioning_status ' '"ACTIVE"') - resp = wait_for_lb_resource( + resp = self.wait_for_lb_resource( lambda x: octavia_client.member_show( pool_id=pool_id, member_id=x), member_id, - operating_status='ONLINE') + operating_status='') + # Temporarily disable this check until we figure out why + # operational_status sometimes does not become 'ONLINE' + # while the member does indeed work and the subsequent + # retrieval of payload through loadbalancer is successful + # ref LP: #1896729. + # operating_status='ONLINE' if monitor else '') logging.info(resp) + return lb - lb_fp = openstack_utils.create_floating_ip( - neutron_client, 'ext_net', port={'id': lb_vip_port_id}) + @staticmethod + @tenacity.retry(wait=tenacity.wait_fixed(1), + reraise=True, stop=tenacity.stop_after_delay(900)) + def _get_payload(ip): + return subprocess.check_output( + ['wget', '-O', '-', + 'http://{}/'.format(ip)], + universal_newlines=True) - @tenacity.retry(wait=tenacity.wait_fixed(1), - reraise=True, stop=tenacity.stop_after_delay(900)) - def get_payload(): - return subprocess.check_output( - ['wget', '-O', '-', - 'http://{}/'.format(lb_fp['floating_ip_address'])], - universal_newlines=True) - snippet = 'This is the default welcome page' - assert snippet in get_payload() - logging.info('Found "{}" in page retrieved through load balancer at ' - '"http://{}/"' - .format(snippet, lb_fp['floating_ip_address'])) + def test_create_loadbalancer(self): + """Create load balancer.""" + # Prepare payload instances + # First we allow communication to port 80 by adding a security group + # rule + project_id = openstack_utils.get_project_id( + self.keystone_client, 'admin', domain_name='admin_domain') + openstack_utils.add_neutron_secgroup_rules( + self.neutron_client, + project_id, + [{'protocol': 'tcp', + 'port_range_min': '80', + 'port_range_max': '80', + 'direction': 'ingress'}]) + + # Then we request two Ubuntu instances with the Apache web server + # installed + instance_1, instance_2 = self.launch_guests( + userdata='#cloud-config\npackages:\n - apache2\n') + + # Get IP of the prepared payload instances + payload_ips = [] + for server in (instance_1, instance_2): + payload_ips.append(server.networks['private'][0]) + self.assertTrue(len(payload_ips) > 0) + + resp = self.neutron_client.list_networks(name='private') + subnet_id = resp['networks'][0]['subnets'][0] + if openstack_utils.dvr_enabled(): + resp = self.neutron_client.list_networks( + name='private_lb_fip_network') + vip_subnet_id = resp['networks'][0]['subnets'][0] + else: + vip_subnet_id = subnet_id + for provider in self.get_lb_providers(self.octavia_client).keys(): + logging.info('Creating loadbalancer with provider {}' + .format(provider)) + final_exc = None + # NOTE: we cannot use tenacity here as the method we call into + # already uses it to wait for operations to complete. + for retry in range(0, 3): + try: + lb = self._create_lb_resources(self.octavia_client, + provider, + vip_subnet_id, + subnet_id, + payload_ips) + break + except (AssertionError, + keystone_exceptions.connection.ConnectFailure) as e: + logging.info('Retrying load balancer creation, last ' + 'failure: "{}"'.format(str(e))) + self.resource_cleanup(only_local=True) + final_exc = e + else: + raise final_exc + + lb_fp = openstack_utils.create_floating_ip( + self.neutron_client, 'ext_net', port={'id': lb['vip_port_id']}) + + snippet = 'This is the default welcome page' + assert snippet in self._get_payload(lb_fp['floating_ip_address']) + logging.info('Found "{}" in page retrieved through load balancer ' + ' (provider="{}") at "http://{}/"' + .format(snippet, provider, + lb_fp['floating_ip_address'])) + + # If we get here, it means the tests passed + self.run_resource_cleanup = True diff --git a/zaza/openstack/charm_tests/openstack_dashboard/__init__.py b/zaza/openstack/charm_tests/openstack_dashboard/__init__.py new file mode 100644 index 0000000..36f23e9 --- /dev/null +++ b/zaza/openstack/charm_tests/openstack_dashboard/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and openstack-dasboard.""" diff --git a/zaza/openstack/charm_tests/openstack_dashboard/tests.py b/zaza/openstack/charm_tests/openstack_dashboard/tests.py new file mode 100644 index 0000000..5cd341c --- /dev/null +++ b/zaza/openstack/charm_tests/openstack_dashboard/tests.py @@ -0,0 +1,553 @@ +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate horizon (openstack-dashboard) charm testing.""" + +import base64 +import http.client +import logging +import requests +import tenacity +import urllib.request +import yaml + +import zaza.model as zaza_model + +import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.generic as generic_utils +import zaza.openstack.charm_tests.policyd.tests as policyd + + +class AuthExceptions(Exception): + """Exception base class for the 401 test.""" + + pass + + +class FailedAuth(AuthExceptions): + """Failed exception for the 401 test.""" + + pass + + +# NOTE: intermittent authentication fails. Wrap in a retry loop. +@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, + min=5, max=10), + reraise=True) +def _login(dashboard_url, domain, username, password, cafile=None): + """Login to the website to get a session. + + :param dashboard_url: The URL of the dashboard to log in to. + :type dashboard_url: str + :param domain: the domain to login into + :type domain: str + :param username: the username to login as + :type username: str + :param password: the password to use to login + :type password: str + :returns: tuple of (client, response) where response is the page after + logging in. + :rtype: (requests.sessions.Session, requests.models.Response) + :raises: FailedAuth if the authorisation doesn't work + """ + auth_url = '{}/auth/login/'.format(dashboard_url) + + # start session, get csrftoken + client = requests.session() + client.get(auth_url, verify=cafile) + if 'csrftoken' in client.cookies: + csrftoken = client.cookies['csrftoken'] + else: + raise Exception("Missing csrftoken") + + # build and send post request + overcloud_auth = openstack_utils.get_overcloud_auth() + + if overcloud_auth['OS_AUTH_URL'].endswith("v2.0"): + api_version = 2 + else: + api_version = 3 + keystone_client = openstack_utils.get_keystone_client( + overcloud_auth) + catalog = keystone_client.service_catalog.get_endpoints() + logging.info(catalog) + if api_version == 2: + region = catalog['identity'][0]['publicURL'] + else: + region = [i['url'] + for i in catalog['identity'] + if i['interface'] == 'public'][0] + + auth = { + 'domain': domain, + 'username': username, + 'password': password, + 'csrfmiddlewaretoken': csrftoken, + 'next': '/horizon/', + 'region': region, + } + + # In the minimal test deployment /horizon/project/ is unauthorized, + # this does not occur in a full deployment and is probably due to + # services/information missing that horizon wants to display data + # for. + # Redirect to /horizon/identity/ instead. + if (openstack_utils.get_os_release() >= + openstack_utils.get_os_release('xenial_queens')): + auth['next'] = '/horizon/identity/' + + if (openstack_utils.get_os_release() >= + openstack_utils.get_os_release('bionic_stein')): + auth['region'] = 'default' + + if api_version == 2: + del auth['domain'] + + logging.info('POST data: "{}"'.format(auth)) + response = client.post( + auth_url, + data=auth, + headers={'Referer': auth_url}, + verify=cafile) + + # NOTE(ajkavanagh) there used to be a trusty/icehouse test in the + # amulet test, but as the zaza tests only test from trusty/mitaka + # onwards, the test has been dropped + if (openstack_utils.get_os_release() >= + openstack_utils.get_os_release('bionic_stein')): + expect = "Sign Out" + # update the in dashboard seems to require region to be default in + # this test configuration + region = 'default' + else: + expect = 'Projects - OpenStack Dashboard' + + if expect not in response.text: + msg = 'FAILURE code={} text="{}"'.format(response, + response.text) + logging.info("Yeah, wen't wrong: {}".format(msg)) + raise FailedAuth(msg) + logging.info("Logged into okay") + return client, response + + +# NOTE(ajkavanagh): it seems that apache2 doesn't start quickly enough +# for the test, and so it gets reset errors; repeat until either that +# stops or there is a failure +@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=5, max=10), + retry=tenacity.retry_if_exception_type( + http.client.RemoteDisconnected), + reraise=True) +def _do_request(request, cafile=None): + """Open a webpage via urlopen. + + :param request: A urllib request object. + :type request: object + :returns: HTTPResponse object + :rtype: object + :raises: URLError on protocol errors + """ + return urllib.request.urlopen(request, cafile=cafile) + + +class OpenStackDashboardBase(): + """Mixin for interacting with Horizon.""" + + def get_base_url(self): + """Return the base url for http(s) requests. + + :returns: URL + :rtype: str + """ + vip = (zaza_model.get_application_config(self.application_name) + .get("vip").get("value")) + if vip: + ip = vip + else: + unit = zaza_model.get_unit_from_name( + zaza_model.get_lead_unit_name(self.application_name)) + ip = unit.public_address + + logging.debug("Dashboard ip is:{}".format(ip)) + scheme = 'http' + if self.use_https: + scheme = 'https' + url = '{}://{}'.format(scheme, ip) + return url + + def get_horizon_url(self): + """Return the url for acccessing horizon. + + :returns: Horizon URL + :rtype: str + """ + url = '{}/horizon'.format(self.get_base_url()) + logging.info("Horizon URL is: {}".format(url)) + return url + + @property + def use_https(self): + """Whether dashboard is using https. + + :returns: Whether dashboard is using https + :rtype: boolean + """ + use_https = False + vault_relation = zaza_model.get_relation_id( + self.application, + 'vault', + remote_interface_name='certificates') + if vault_relation: + use_https = True + return use_https + + +class OpenStackDashboardTests(test_utils.OpenStackBaseTest, + OpenStackDashboardBase): + """Encapsulate openstack dashboard charm tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running openstack dashboard charm tests.""" + super(OpenStackDashboardTests, cls).setUpClass() + cls.application = 'openstack-dashboard' + + def test_050_local_settings_permissions_regression_check_lp1755027(self): + """Assert regression check lp1755027. + + Assert the intended file permissions on openstack-dashboard's + configuration file. Regression coverage for + https://bugs.launchpad.net/bugs/1755027. + + Ported from amulet tests. + """ + file_path = '/etc/openstack-dashboard/local_settings.py' + expected_perms = '640' + unit_name = zaza_model.get_lead_unit_name('openstack-dashboard') + + logging.info('Checking {} permissions...'.format(file_path)) + + # NOTE(beisner): This could be a new test helper, but it needs + # to be a clean backport to stable with high prio, so maybe later. + cmd = 'stat -c %a {}'.format(file_path) + output = zaza_model.run_on_unit(unit_name, cmd) + perms = output['Stdout'].strip() + assert perms == expected_perms, \ + ('{} perms of {} not expected ones of {}' + .format(file_path, perms, expected_perms)) + + def test_100_services(self): + """Verify the expected services are running. + + Ported from amulet tests. + """ + logging.info('Checking openstack-dashboard services...') + + unit_name = zaza_model.get_lead_unit_name('openstack-dashboard') + openstack_services = ['apache2'] + services = {} + services[unit_name] = openstack_services + + for unit_name, unit_services in services.items(): + zaza_model.block_until_service_status( + unit_name=unit_name, + services=unit_services, + target_status='running' + ) + + def test_200_haproxy_stats_config(self): + """Verify that the HAProxy stats are properly setup.""" + logging.info('Checking dashboard HAProxy settings...') + unit = zaza_model.get_unit_from_name( + zaza_model.get_lead_unit_name(self.application_name)) + logging.debug("... dashboard_ip is:{}".format(unit.public_address)) + conf = '/etc/haproxy/haproxy.cfg' + port = '8888' + set_alternate = { + 'haproxy-expose-stats': 'True', + } + + request = urllib.request.Request( + 'http://{}:{}'.format(unit.public_address, port)) + + output = str(generic_utils.get_file_contents(unit, conf)) + + for line in output.split('\n'): + if "stats auth" in line: + password = line.split(':')[1] + base64string = base64.b64encode( + bytes('{}:{}'.format('admin', password), 'ascii')) + request.add_header( + "Authorization", "Basic {}".format(base64string.decode('utf-8'))) + + # Expect default config to not be available externally. + expected = 'bind 127.0.0.1:{}'.format(port) + self.assertIn(expected, output) + with self.assertRaises(urllib.error.URLError): + _do_request(request) + + zaza_model.set_application_config(self.application_name, set_alternate) + zaza_model.block_until_all_units_idle(model_name=self.model_name) + + # Once exposed, expect HAProxy stats to be available externally + output = str(generic_utils.get_file_contents(unit, conf)) + expected = 'bind 0.0.0.0:{}'.format(port) + html = _do_request(request).read().decode(encoding='utf-8') + self.assertIn(expected, output) + self.assertIn('Statistics Report for HAProxy', html, + "HAProxy stats check failed") + + def test_302_router_settings(self): + """Verify that the horizon router settings are correct. + + Ported from amulet tests. + """ + # note this test is only valid after trusty-icehouse; however, all of + # the zaza tests are after trusty-icehouse + logging.info('Checking dashboard router settings...') + unit_name = zaza_model.get_lead_unit_name('openstack-dashboard') + conf = ('/usr/share/openstack-dashboard/openstack_dashboard/' + 'enabled/_40_router.py') + + cmd = 'cat {}'.format(conf) + output = zaza_model.run_on_unit(unit_name, cmd) + + expected = { + 'DISABLED': "True", + } + mismatches = self.crude_py_parse(output['Stdout'], expected) + assert not mismatches, ("mismatched keys on {} were:\n{}" + .format(conf, ", ".join(mismatches))) + + def crude_py_parse(self, file_contents, expected): + """Parse a python file looking for key = value assignements.""" + mismatches = [] + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = "Mismatch %s != %s" % (expected[key], value) + mismatches.append(msg) + return mismatches + + def test_400_connection(self): + """Test that dashboard responds to http request. + + Ported from amulet tests. + """ + logging.info('Checking dashboard http response...') + request = urllib.request.Request(self.get_horizon_url()) + try: + logging.info("... trying to fetch the page") + html = _do_request(request, cafile=self.cacert) + logging.info("... fetched page") + except Exception as e: + logging.info("... exception raised was {}".format(str(e))) + raise + return html.read().decode('utf-8') + self.assertIn('OpenStack Dashboard', html, + "Dashboard frontpage check failed") + + def test_401_authenticate(self): + """Validate that authentication succeeds for client log in.""" + logging.info('Checking authentication through dashboard...') + + overcloud_auth = openstack_utils.get_overcloud_auth() + password = overcloud_auth['OS_PASSWORD'], + logging.info("admin password is {}".format(password)) + # try to get the url which will either pass or fail with a 403 + overcloud_auth = openstack_utils.get_overcloud_auth() + domain = 'admin_domain', + username = 'admin', + password = overcloud_auth['OS_PASSWORD'], + _login( + self.get_horizon_url(), + domain, + username, + password, + cafile=self.cacert) + logging.info('OK') + + def test_404_connection(self): + """Verify the apache status module gets disabled when hardening apache. + + Ported from amulet tests. + """ + logging.info('Checking apache mod_status gets disabled.') + logging.debug('Maybe enabling hardening for apache...') + _app_config = zaza_model.get_application_config(self.application_name) + logging.info(_app_config['harden']) + + request = urllib.request.Request(self.get_horizon_url()) + with self.config_change( + {'harden': _app_config['harden'].get('value', '')}, + {'harden': 'apache'}): + try: + _do_request(request, cafile=self.cacert) + except urllib.request.HTTPError as e: + # test failed if it didn't return 404 + msg = "Apache mod_status check failed." + self.assertEqual(e.code, 404, msg) + logging.info('OK') + + def test_900_restart_on_config_change(self): + """Verify that the specified services are restarted on config changed. + + Ported from amulet tests. + """ + logging.info("Testing restart on config changed.") + + # Expected default and alternate values + current_value = zaza_model.get_application_config( + self.application_name)['use-syslog']['value'] + new_value = str(not bool(current_value)).title() + current_value = str(current_value).title() + + # Expected default and alternate values + set_default = {'use-syslog': current_value} + set_alternate = {'use-syslog': new_value} + + # Services which are expected to restart upon config change, + # and corresponding config files affected by the change + services = ['apache2', 'memcached'] + conf_file = '/etc/openstack-dashboard/local_settings.py' + + # Make config change, check for service restarts + logging.info('Setting use-syslog on openstack-dashboard {}' + .format(set_alternate)) + self.restart_on_changed( + conf_file, + set_default, + set_alternate, + None, None, + services) + + def test_910_pause_and_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + + Ported from amulet tests. + """ + with self.pause_resume(['apache2']): + logging.info("Testing pause resume") + + +class OpenStackDashboardPolicydTests(policyd.BasePolicydSpecialization, + OpenStackDashboardBase): + """Test the policyd override using the dashboard.""" + + good = { + "identity/file1.yaml": "{'rule1': '!'}" + } + bad = { + "identity/file2.yaml": "{'rule': '!}" + } + path_infix = "keystone_policy.d" + _rule = {'identity/rule.yaml': yaml.dump({ + 'identity:list_domains': '!', + 'identity:get_domain': '!', + 'identity:update_domain': '!', + 'identity:list_domains_for_user': '!', + })} + + # url associated with rule above that will return HTTP 403 + url = "http://{}/horizon/identity/domains" + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running horizon charm operation tests.""" + super(OpenStackDashboardPolicydTests, cls).setUpClass( + application_name="openstack-dashboard") + cls.application_name = "openstack-dashboard" + cls.application = cls.application_name + + def get_client_and_attempt_operation(self, ip): + """Attempt to list users on the openstack-dashboard service. + + This is slightly complicated in that the client is actually a web-site. + Thus, the test has to login first and then attempt the operation. This + makes the test a little more complicated. + + :param ip: the IP address to get the session against. + :type ip: str + :raises: PolicydOperationFailedException if operation fails. + """ + unit = zaza_model.get_unit_from_name( + zaza_model.get_lead_unit_name(self.application_name)) + logging.info("Dashboard is at {}".format(unit.public_address)) + overcloud_auth = openstack_utils.get_overcloud_auth() + password = overcloud_auth['OS_PASSWORD'], + logging.info("admin password is {}".format(password)) + # try to get the url which will either pass or fail with a 403 + overcloud_auth = openstack_utils.get_overcloud_auth() + domain = 'admin_domain', + username = 'admin', + password = overcloud_auth['OS_PASSWORD'], + client, response = _login( + self.get_horizon_url(), domain, username, password) + # now attempt to get the domains page + _url = self.url.format(unit.public_address) + result = client.get(_url) + if result.status_code == 403: + raise policyd.PolicydOperationFailedException("Not authenticated") + + +class SecurityTests(test_utils.OpenStackBaseTest, + OpenStackDashboardBase): + """Openstack-dashboard security tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running openstack-dashboard SecurityTests.""" + super(SecurityTests, cls).setUpClass() + + def test_security_checklist(self): + """Verify expected state with security checklist.""" + logging.info("Testing security checklist.") + + expected_failures = [ + 'csrf_cookie_set', + 'disable_password_reveal', + 'disallow-iframe-embed', + 'password-validator-is-not-default', + 'securie_proxy_ssl_header_is_set', + 'session_cookie-httponly', + 'session-cookie-store', + ] + expected_passes = [ + 'disable_password_autocomplete', + 'enforce-password-check', + 'validate-file-ownership', + 'validate-file-permissions' + ] + + logging.info('Running `security-checklist` action' + ' on {} leader'.format(self.application_name)) + test_utils.audit_assertions( + zaza_model.run_action_on_leader( + self.application_name, + 'security-checklist', + model_name=self.model_name, + action_params={}), + expected_passes, + expected_failures, + expected_to_pass=False) diff --git a/zaza/openstack/charm_tests/openstack_upgrade/__init__.py b/zaza/openstack/charm_tests/openstack_upgrade/__init__.py new file mode 100644 index 0000000..9a1ca53 --- /dev/null +++ b/zaza/openstack/charm_tests/openstack_upgrade/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for testing openstack upgrades.""" diff --git a/zaza/openstack/charm_tests/openstack_upgrade/tests.py b/zaza/openstack/charm_tests/openstack_upgrade/tests.py new file mode 100644 index 0000000..82c7fbb --- /dev/null +++ b/zaza/openstack/charm_tests/openstack_upgrade/tests.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Define class for OpenStack Upgrade.""" + +import logging +import unittest + +from zaza.openstack.utilities import ( + cli as cli_utils, + upgrade_utils as upgrade_utils, + openstack as openstack_utils, + openstack_upgrade as openstack_upgrade, +) +from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest + + +class OpenStackUpgradeVMLaunchBase(object): + """A base class to peform a simple validation on the cloud. + + This wraps an OpenStack upgrade with a VM launch before and after the + upgrade. + + This test requires a full OpenStack including at least: keystone, glance, + nova-cloud-controller, nova-compute, neutron-gateway, neutron-api and + neutron-openvswitch. + + This class should be used as a base class to the upgrade 'test'. + """ + + @classmethod + def setUpClass(cls): + """Run setup for OpenStack Upgrades.""" + print("Running OpenStackUpgradeMixin setUpClass") + super().setUpClass() + cls.lts = LTSGuestCreateTest() + cls.lts.setUpClass() + + def test_100_validate_pre_openstack_upgrade_cloud(self): + """Validate pre openstack upgrade.""" + logging.info("Validate pre-openstack-upgrade: Spin up LTS instance") + self.lts.test_launch_small_instance() + + def test_500_validate_openstack_upgraded_cloud(self): + """Validate post openstack upgrade.""" + logging.info("Validate post-openstack-upgrade: Spin up LTS instance") + self.lts.test_launch_small_instance() + + +class WaitForMySQL(unittest.TestCase): + """Helper test to wait on mysql-innodb-cluster to be fully ready. + + In practice this means that there is at least on R/W unit available. + Sometimes, after restarting units in the mysql-innodb-cluster, all the + units are R/O until the cluster picks the R/W unit. + """ + + @classmethod + def setUpClass(cls): + """Set up class.""" + print("Running OpenstackUpgradeTests setUpClass") + super().setUpClass() + cli_utils.setup_logging() + + def test_100_wait_for_happy_mysql_innodb_cluster(self): + """Wait for mysql cluster to have at least one R/W node.""" + logging.info("Starting wait for an R/W unit.") + openstack_upgrade.block_until_mysql_innodb_cluster_has_rw() + logging.info("Done .. all seems well.") + + +class OpenStackUpgradeTestsFocalUssuri(OpenStackUpgradeVMLaunchBase): + """Upgrade OpenStack from distro -> cloud:focal-victoria.""" + + @classmethod + def setUpClass(cls): + """Run setup for OpenStack Upgrades.""" + print("Running OpenstackUpgradeTests setUpClass") + super().setUpClass() + cli_utils.setup_logging() + + def test_200_run_openstack_upgrade(self): + """Run openstack upgrade, but work out what to do.""" + openstack_upgrade.run_upgrade_tests("cloud:focal-victoria") + + +class OpenStackUpgradeTests(OpenStackUpgradeVMLaunchBase): + """A Principal Class to encapsulate OpenStack Upgrade Tests. + + A generic Test class that can discover which Ubuntu version and OpenStack + version to upgrade from. + + TODO: Not used at present. Use the declarative tests directly that choose + the version to upgrade to. The functions that this class depends on need a + bit more work regarding how the determine which version to go to. + """ + + @classmethod + def setUpClass(cls): + """Run setup for OpenStack Upgrades.""" + print("Running OpenstackUpgradeTests setUpClass") + super().setUpClass() + cli_utils.setup_logging() + + def test_200_run_openstack_upgrade(self): + """Run openstack upgrade, but work out what to do. + + TODO: This is really inefficient at the moment, and doesn't (yet) + determine which ubuntu version to work from. Don't use until we can + make it better. + """ + # TODO: work out the most recent Ubuntu version; we assume this is the + # version that OpenStack is running on. + ubuntu_version = "focal" + logging.info("Getting all principle applications ...") + principle_services = upgrade_utils.get_all_principal_applications() + logging.info( + "Getting OpenStack vesions from principal applications ...") + current_versions = openstack_utils.get_current_os_versions( + principle_services) + logging.info("current versions: %s" % current_versions) + # Find the lowest value openstack release across all services and make + # sure all servcies are upgraded to one release higher than the lowest + from_version = upgrade_utils.get_lowest_openstack_version( + current_versions) + logging.info("from version: %s" % from_version) + to_version = upgrade_utils.determine_next_openstack_release( + from_version)[1] + logging.info("to version: %s" % to_version) + # TODO: need to determine the ubuntu base verion that is being upgraded + target_source = upgrade_utils.determine_new_source( + ubuntu_version, from_version, to_version, single_increment=True) + logging.info("target source: %s" % target_source) + assert target_source is not None + openstack_upgrade.run_upgrade_tests(target_source) diff --git a/zaza/openstack/charm_tests/ovn/__init__.py b/zaza/openstack/charm_tests/ovn/__init__.py new file mode 100644 index 0000000..bd5900c --- /dev/null +++ b/zaza/openstack/charm_tests/ovn/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing OVN.""" diff --git a/zaza/openstack/charm_tests/ovn/setup.py b/zaza/openstack/charm_tests/ovn/setup.py new file mode 100644 index 0000000..122ce70 --- /dev/null +++ b/zaza/openstack/charm_tests/ovn/setup.py @@ -0,0 +1,154 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for configuring OVN tests.""" + +import logging + +import zaza + +import zaza.openstack.charm_tests.test_utils as test_utils + + +class _OVNSetupHelper(test_utils.BaseCharmTest): + """Helper class to get at the common `config_change` helper.""" + + @staticmethod + def _get_instance_mtu_from_global_physnet_mtu(): + """Calculate instance mtu from Neutron API global-physnet-mtu. + + :returns: Value for instance mtu after migration. + :rtype: int + """ + n_api_config = zaza.model.get_application_config('neutron-api') + + # NOTE: we would have to adjust this calculation if we use IPv6 tunnel + # endpoints + GENEVE_ENCAP_OVERHEAD = 38 + IP4_HEADER_SIZE = 20 + return int(n_api_config['global-physnet-mtu']['value']) - ( + GENEVE_ENCAP_OVERHEAD + IP4_HEADER_SIZE) + + def _configure_apps(self, apps, cfg, + first_match_raise_if_none_found=False): + """Conditionally configure a set of applications. + + :param apps: Applications. + :type apps: Iterator[str] + :param cfg: Configuration to apply. + :type cfg: Dict[str,any] + :param first_match_raise_if_none_found: When set the method will + configure the first application + it finds in the model and raise + an exception if none are found. + :type first_match_raise_if_none_found: bool + :raises: RuntimeError + """ + for app in apps: + try: + zaza.model.get_application(app) + for k, v in cfg.items(): + logging.info('Setting `{}` to "{}" on "{}"...' + .format(k, v, app)) + with self.config_change(cfg, cfg, app): + # The intent here is to change the config and not + # restore it. We accomplish that by passing in the same + # value for default and alternate. + # + # The reason for using the `config_change` helper for + # this is that it already deals with all the + # permutations of config already being set etc and does + # not get into trouble if the test bundle already has + # the values we try to set. + if first_match_raise_if_none_found: + break + else: + continue + else: + if first_match_raise_if_none_found: + raise RuntimeError( + 'None of the expected apps ({}) are present in ' + 'the model.' + .format(apps) + ) + except KeyError: + pass + + def configure_ngw_novs(self): + """Configure n-ovs and n-gw units.""" + cfg = { + # To be able to have instances successfully survive the migration + # without communication issues we need to lower the MTU announced + # to instances prior to migration. + # + # NOTE: In a real world scenario the end user would configure the + # MTU at least 24 hrs prior to doing the migration to allow + # instances to reconfigure as they renew the DHCP lease. + # + # NOTE: For classic n-gw topologies the `instance-mtu` config + # is a NOOP on neutron-openvswitch units, but that is ok. + 'instance-mtu': self._get_instance_mtu_from_global_physnet_mtu() + } + apps = ('neutron-gateway', 'neutron-openvswitch') + self._configure_apps(apps, cfg) + cfg_ovs = { + # To be able to successfully clean up after the Neutron agents we + # need to use the 'openvswitch' `firewall-driver`. + 'firewall-driver': 'openvswitch', + } + self._configure_apps(('neutron-openvswitch',), cfg_ovs) + + def configure_ovn_mappings(self): + """Copy mappings from n-gw or n-ovs application.""" + dst_apps = ('ovn-dedicated-chassis', 'ovn-chassis') + src_apps = ('neutron-gateway', 'neutron-openvswitch') + ovn_cfg = {} + for app in src_apps: + try: + app_cfg = zaza.model.get_application_config(app) + ovn_cfg['bridge-interface-mappings'] = app_cfg[ + 'data-port']['value'] + ovn_cfg['ovn-bridge-mappings'] = app_cfg[ + 'bridge-mappings']['value'] + # Use values from neutron-gateway when present, otherwise use + # values from neutron-openvswitch + break + except KeyError: + pass + else: + raise RuntimeError( + 'None of the expected apps ({}) are present in the model.' + .format(src_apps) + ) + + self._configure_apps( + dst_apps, ovn_cfg, first_match_raise_if_none_found=True) + + +def pre_migration_configuration(): + """Perform pre-migration configuration steps. + + NOTE: Doing the configuration post-deploy and after doing initial network + configuration is an important part of the test as we need to prove that our + end users would be successful in doing this in the wild. + """ + # we use a helper class to leverage common setup code and the + # `config_change` helper + helper = _OVNSetupHelper() + helper.setUpClass() + # Configure `firewall-driver` and `instance-mtu` on n-gw and n-ovs units. + helper.configure_ngw_novs() + # Copy mappings from n-gw or n-ovs application to ovn-dedicated-chassis or + # ovn-chassis. + helper.configure_ovn_mappings() diff --git a/zaza/openstack/charm_tests/ovn/tests.py b/zaza/openstack/charm_tests/ovn/tests.py new file mode 100644 index 0000000..2478d66 --- /dev/null +++ b/zaza/openstack/charm_tests/ovn/tests.py @@ -0,0 +1,557 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate OVN testing.""" + +import logging + +import juju + +import tenacity + +import zaza + +import zaza.model +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.generic as generic_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +class BaseCharmOperationTest(test_utils.BaseCharmTest): + """Base OVN Charm operation tests.""" + + # override if not possible to determine release pair from charm under test + release_application = None + + @classmethod + def setUpClass(cls): + """Run class setup for OVN charm operation tests.""" + super(BaseCharmOperationTest, cls).setUpClass() + cls.services = ['NotImplemented'] # This must be overridden + cls.nrpe_checks = ['NotImplemented'] # This must be overridden + cls.current_release = openstack_utils.get_os_release( + openstack_utils.get_current_os_release_pair( + cls.release_application or cls.application_name)) + + @tenacity.retry( + retry=tenacity.retry_if_result(lambda ret: ret is not None), + # sleep for 2mins to allow 1min cron job to run... + wait=tenacity.wait_fixed(120), + stop=tenacity.stop_after_attempt(2)) + def _retry_check_commands_on_units(self, cmds, units): + return generic_utils.check_commands_on_units(cmds, units) + + def test_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped, then resume and check + they are started. + """ + with self.pause_resume(self.services): + logging.info('Testing pause resume (services="{}")' + .format(self.services)) + + def test_nrpe_configured(self): + """Confirm that the NRPE service check files are created.""" + units = zaza.model.get_units(self.application_name) + cmds = [] + for check_name in self.nrpe_checks: + cmds.append( + 'egrep -oh /usr/local.* /etc/nagios/nrpe.d/' + 'check_{}.cfg'.format(check_name) + ) + ret = self._retry_check_commands_on_units(cmds, units) + if ret: + logging.info(ret) + self.assertIsNone(ret, msg=ret) + + +class CentralCharmOperationTest(BaseCharmOperationTest): + """OVN Central Charm operation tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for OVN Central charm operation tests.""" + super(CentralCharmOperationTest, cls).setUpClass() + cls.services = [ + 'ovn-northd', + 'ovsdb-server', + ] + source = zaza.model.get_application_config( + cls.application_name)['source']['value'] + logging.info(source) + if 'train' in source: + cls.nrpe_checks = [ + 'ovn-northd', + 'ovn-nb-ovsdb', + 'ovn-sb-ovsdb', + ] + else: + # Ussuri or later (distro or cloudarchive) + cls.nrpe_checks = [ + 'ovn-northd', + 'ovn-ovsdb-server-sb', + 'ovn-ovsdb-server-nb', + ] + + +class ChassisCharmOperationTest(BaseCharmOperationTest): + """OVN Chassis Charm operation tests.""" + + release_application = 'ovn-central' + + @classmethod + def setUpClass(cls): + """Run class setup for OVN Chassis charm operation tests.""" + super(ChassisCharmOperationTest, cls).setUpClass() + cls.services = [ + 'ovn-controller', + ] + if cls.application_name == 'ovn-chassis': + principal_app_name = 'magpie' + else: + principal_app_name = cls.application_name + source = zaza.model.get_application_config( + principal_app_name)['source']['value'] + logging.info(source) + if 'train' in source: + cls.nrpe_checks = [ + 'ovn-host', + 'ovs-vswitchd', + 'ovsdb-server', + ] + else: + # Ussuri or later (distro or cloudarchive) + cls.nrpe_checks = [ + 'ovn-controller', + 'ovsdb-server', + 'ovs-vswitchd', + ] + + def test_prefer_chassis_as_gw(self): + """Confirm effect of prefer-chassis-as-gw configuration option.""" + expected_key = 'external-ids:ovn-cms-options' + expected_value = 'enable-chassis-as-gw' + with self.config_change( + {}, {'prefer-chassis-as-gw': True}, + reset_to_charm_default=True): + for unit in zaza.model.get_units(self.application_name): + self.assertEqual( + zaza.model.run_on_unit( + unit.entity_id, + 'ovs-vsctl get open-vswitch . {}'.format(expected_key) + )['Stdout'].rstrip(), + expected_value) + logging.info( + '{}: "{}" set to "{}"' + .format(unit.entity_id, expected_key, expected_value)) + logging.info('Config restored, checking things went back to normal') + for unit in zaza.model.get_units(self.application_name): + self.assertEqual( + zaza.model.run_on_unit( + unit.entity_id, + 'ovs-vsctl get open-vswitch . ' + 'external-ids:ovn-cms-options')['Code'], + '1') + logging.info( + '{}: "{}" no longer present' + .format(unit.entity_id, expected_key)) + + +class OVSOVNMigrationTest(test_utils.BaseCharmTest): + """OVS to OVN migration tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for OVN migration tests.""" + super(OVSOVNMigrationTest, cls).setUpClass() + cls.current_release = openstack_utils.get_os_release( + openstack_utils.get_current_os_release_pair()) + + def setUp(self): + """Perform migration steps prior to validation.""" + super(OVSOVNMigrationTest, self).setUp() + # These steps are here due to them having to be executed once and in a + # specific order prior to running any tests. The steps should still + # be idempotent if at all possible as a courtesy to anyone iterating + # on the test code. + try: + if self.one_time_init_done: + logging.debug('Skipping migration steps as they have already ' + 'run.') + return + except AttributeError: + logging.info('Performing migration steps.') + + # as we progress through the steps our target deploy status changes + # store it in the class instance so the individual methods can + # update when appropriate. + self.target_deploy_status = self.test_config.get( + 'target_deploy_status', {}) + + # Stop Neutron agents on hypervisors + self._pause_units('neutron-openvswitch') + try: + self._pause_units('neutron-gateway') + except KeyError: + logging.info( + 'No neutron-gateway in deployment, skip pausing it.') + + # Add the neutron-api-plugin-ovn subordinate which will make the + # `neutron-api-plugin-ovn` unit appear in the deployment. + # + # NOTE: The OVN drivers will not be activated until we change the + # value for the `manage-neutron-plugin-legacy-mode` config. + self._add_neutron_api_plugin_ovn_subordinate_relation() + + # Adjust MTU on overlay networks + # + # Prior to this the end user will already have lowered the MTU on their + # running instances through the use of the `instance-mtu` configuration + # option and manual reconfiguration of instances that do not use DHCP. + # + # We update the value for the MTU on the overlay networks at this point + # in time because: + # + # - Agents are paused and will not actually reconfigure the networks. + # + # - Making changes to non-Geneve networks are prohibited as soon as the + # OVN drivers are activated. + # + # - Get the correct MTU value into the OVN database on first sync. + # + # - This will be particularly important for any instances using + # stateless IPv6 autoconfiguration (SLAAC) as there is currently + # no config knob to feed MTU information into the legacy ML2+OVS + # `radvd` configuration or the native OVN RA. + # + # - Said instances will reconfigure their IPv6 MTU as soon as they + # receive an RA with correct MTU when OVN takes over control. + self._run_migrate_mtu_action() + + # Flip `manage-neutron-plugin-legacy-mode` to enable it + # + # NOTE(fnordahl): until we sync/repair the OVN DB this will make the + # `neutron-server` log errors. However we need the neutron unit to be + # unpaused while doing this to have the configuration rendered. The + # configuration is consumed by the `neutron-ovn-db-sync` tool. + self._configure_neutron_api() + + # Stop the Neutron server prior to OVN DB sync/repair + self._pause_units('neutron-api') + + # Sync the OVN DB + self._run_migrate_ovn_db_action() + # Perform the optional morphing of Neutron DB action + self._run_offline_neutron_morph_db_action() + self._resume_units('neutron-api') + + # Run `cleanup` action on neutron-openvswitch units/hypervisors + self._run_cleanup_action('neutron-openvswitch') + # Run `cleanup` action on neutron-gateway units when present + try: + self._run_cleanup_action('neutron-gateway') + except KeyError: + logging.info( + 'No neutron-gateway in deployment, skip cleanup of it.') + + # Start the OVN controller on hypervisors + # + # NOTE(fnordahl): it is very important to have run cleanup prior to + # starting these, if you don't do that it is almost guaranteed that + # you will program the network to a state of infinite loop. + self._resume_units('ovn-chassis') + + try: + self._resume_units('ovn-dedicated-chassis') + except KeyError: + logging.info( + 'No ovn-dedicated-chassis in deployment, skip resume.') + + # And we should be off to the races + + self.one_time_init_done = True + + def _add_neutron_api_plugin_ovn_subordinate_relation(self): + """Add relation between neutron-api and neutron-api-plugin-ovn.""" + try: + logging.info('Adding relation neutron-api-plugin-ovn ' + '-> neutron-api') + zaza.model.add_relation( + 'neutron-api-plugin-ovn', 'neutron-plugin', + 'neutron-api:neutron-plugin-api-subordinate') + zaza.model.wait_for_agent_status() + + # NOTE(lourot): usually in this scenario, the test bundle has been + # originally deployed with a non-related neutron-api-plugin-ovn + # subordinate application, and thus Zaza has been taught to expect + # initially no unit from this application. We are now relating it + # to a principal neutron-api application with one unit. Thus we now + # need to make sure we wait for one unit from this subordinate + # before proceeding: + target_deploy_status = self.test_config.get('target_deploy_status', + {}) + try: + target_deploy_status['neutron-api-plugin-ovn'][ + 'num-expected-units'] = 1 + except KeyError: + # num-expected-units wasn't set to 0, no expectation to be + # fixed, let's move on. + pass + + zaza.model.wait_for_application_states( + states=target_deploy_status) + + except juju.errors.JujuAPIError: + # we were not able to add the relation, let's make sure it's + # because it's already there + assert (zaza.model.get_relation_id( + 'neutron-api-plugin-ovn', 'neutron-api', + remote_interface_name='neutron-plugin-api-subordinate') + is not None), 'Unable to add relation required for test' + logging.info('--> On the other hand, did not need to add the ' + 'relation as it was already there.') + + def _configure_neutron_api(self): + """Set configuration option `manage-neutron-plugin-legacy-mode`.""" + logging.info('Configuring `manage-neutron-plugin-legacy-mode` for ' + 'neutron-api...') + n_api_config = { + 'manage-neutron-plugin-legacy-mode': False, + } + with self.config_change( + n_api_config, n_api_config, 'neutron-api'): + logging.info('done') + + def _run_offline_neutron_morph_db_action(self): + """Run offline-neutron-morph-db action.""" + logging.info('Running the optional `offline-neutron-morph-db` action ' + 'on neutron-api-plugin-ovn/leader') + generic_utils.assertActionRanOK( + zaza.model.run_action_on_leader( + 'neutron-api-plugin-ovn', + 'offline-neutron-morph-db', + action_params={ + 'i-really-mean-it': True}, + raise_on_failure=True, + ) + ) + + def _run_migrate_ovn_db_action(self): + """Run migrate-ovn-db action.""" + logging.info('Running `migrate-ovn-db` action on ' + 'neutron-api-plugin-ovn/leader') + generic_utils.assertActionRanOK( + zaza.model.run_action_on_leader( + 'neutron-api-plugin-ovn', + 'migrate-ovn-db', + action_params={ + 'i-really-mean-it': True}, + raise_on_failure=True, + ) + ) + + # Charm readiness is no guarantee for API being ready to serve requests. + # https://bugs.launchpad.net/charm-neutron-api/+bug/1854518 + @tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60), + reraise=True, stop=tenacity.stop_after_attempt(3)) + def _run_migrate_mtu_action(self): + """Run migrate-mtu action with retry. + + The action is idempotent. + + Due to LP: #1854518 and the point in time of the test life cycle we run + this action the probability for the Neutron API not being available + for the script to do its job is high, thus we retry. + """ + logging.info('Running `migrate-mtu` action on ' + 'neutron-api-plugin-ovn/leader') + generic_utils.assertActionRanOK( + zaza.model.run_action_on_leader( + 'neutron-api-plugin-ovn', + 'migrate-mtu', + action_params={ + 'i-really-mean-it': True}, + raise_on_failure=True, + ) + ) + + def _pause_units(self, application): + """Pause units of application. + + :param application: Name of application + :type application: str + """ + logging.info('Pausing {} units'.format(application)) + zaza.model.run_action_on_units( + [unit.entity_id + for unit in zaza.model.get_units(application)], + 'pause', + raise_on_failure=True, + ) + self.target_deploy_status.update( + { + application: { + 'workload-status': 'maintenance', + 'workload-status-message': 'Paused', + }, + }, + ) + + def _run_cleanup_action(self, application): + """Run cleanup action on application units. + + :param application: Name of application + :type application: str + """ + logging.info('Running `cleanup` action on {} units.' + .format(application)) + zaza.model.run_action_on_units( + [unit.entity_id + for unit in zaza.model.get_units(application)], + 'cleanup', + action_params={ + 'i-really-mean-it': True}, + raise_on_failure=True, + ) + + def _resume_units(self, application): + """Resume units of application. + + :param application: Name of application + :type application: str + """ + logging.info('Resuming {} units'.format(application)) + zaza.model.run_action_on_units( + [unit.entity_id + for unit in zaza.model.get_units(application)], + 'resume', + raise_on_failure=True, + ) + self.target_deploy_status.pop(application) + + def test_ovs_ovn_migration(self): + """Test migration of existing Neutron ML2+OVS deployment to OVN. + + The test should be run after deployment and validation of a legacy + deployment combined with subsequent run of a network connectivity test + on instances created prior to the migration. + """ + # The setUp method of this test class will perform the migration steps. + # The tests.yaml is programmed to do further validation after the + # migration. + + # Reset the n-gw and n-ovs instance-mtu configuration option so it does + # not influence how further tests are executed. + reset_config_keys = ['instance-mtu'] + for app in ('neutron-gateway', 'neutron-openvswitch'): + try: + zaza.model.reset_application_config(app, reset_config_keys) + logging.info('Reset configuration to default on "{}" for "{}"' + .format(app, reset_config_keys)) + except KeyError: + pass + zaza.model.wait_for_agent_status() + zaza.model.wait_for_application_states( + states=self.target_deploy_status) + # Workaround for our old friend LP: #1852221 which hit us again on + # Groovy. We make the os_release check explicit so that we can + # re-evaluate the need for the workaround at the next release. + if self.current_release == openstack_utils.get_os_release( + 'groovy_victoria'): + try: + for application in ('ovn-chassis', 'ovn-dedicated-chassis'): + for unit in zaza.model.get_units(application): + zaza.model.run_on_unit( + unit.entity_id, + 'systemctl restart ovs-vswitchd') + except KeyError: + # One of the applications is not in the model, which is fine + pass + + +class OVNChassisDeferredRestartTest(test_utils.BaseDeferredRestartTest): + """Deferred restart tests.""" + + @classmethod + def setUpClass(cls): + """Run setup for deferred restart tests.""" + super().setUpClass(application_name='ovn-chassis') + + def run_tests(self): + """Run deferred restart tests.""" + # Trigger a config change which triggers a deferred hook. + self.run_charm_change_hook_test('configure_ovs') + + # Trigger a package change which requires a restart + self.run_package_change_test( + 'openvswitch-switch', + 'openvswitch-switch') + + def get_new_config(self): + """Return the config key and new value to trigger a hook execution. + + :returns: Config key and new value + :rtype: (str, bool) + """ + app_config = zaza.model.get_application_config(self.application_name) + return 'enable-sriov', str(not app_config['enable-sriov']['value']) + + +class OVNDedicatedChassisDeferredRestartTest( + test_utils.BaseDeferredRestartTest): + """Deferred restart tests.""" + + @classmethod + def setUpClass(cls): + """Run setup for deferred restart tests.""" + super().setUpClass(application_name='ovn-dedicated-chassis') + + def run_tests(self): + """Run deferred restart tests.""" + # Trigger a config change which triggers a deferred hook. + self.run_charm_change_hook_test('configure_ovs') + + # Trigger a package change which requires a restart + self.run_package_change_test( + 'openvswitch-switch', + 'openvswitch-switch') + + def get_new_config(self): + """Return the config key and new value to trigger a hook execution. + + :returns: Config key and new value + :rtype: (str, bool) + """ + app_config = zaza.model.get_application_config(self.application_name) + new_value = str(not app_config['disable-mlockall'].get('value', False)) + return 'disable-mlockall', new_value + + +class OVNCentralDeferredRestartTest( + test_utils.BaseDeferredRestartTest): + """Deferred restart tests for OVN Central.""" + + @classmethod + def setUpClass(cls): + """Run setup for deferred restart tests.""" + super().setUpClass(application_name='ovn-central') + + def run_tests(self): + """Run deferred restart tests.""" + # Charm does not defer hooks so that test is not included. + # Trigger a package change which requires a restart + self.run_package_change_test( + 'ovn-central', + 'ovn-central') diff --git a/zaza/openstack/charm_tests/pacemaker_remote/tests.py b/zaza/openstack/charm_tests/pacemaker_remote/tests.py index f0d0b4d..328d5cf 100644 --- a/zaza/openstack/charm_tests/pacemaker_remote/tests.py +++ b/zaza/openstack/charm_tests/pacemaker_remote/tests.py @@ -26,5 +26,8 @@ class PacemakerRemoteTest(unittest.TestCase): def test_check_nodes_online(self): """Test that all nodes are online.""" + zaza.openstack.configure.hacluster.remove_node( + 'api', + 'node1') self.assertTrue( zaza.openstack.configure.hacluster.check_all_nodes_online('api')) diff --git a/zaza/openstack/charm_tests/policyd/__init__.py b/zaza/openstack/charm_tests/policyd/__init__.py new file mode 100644 index 0000000..e8c1746 --- /dev/null +++ b/zaza/openstack/charm_tests/policyd/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Policyd. + +Collection of code for setting up and testing policyd overrides across a +collection of charms. +""" diff --git a/zaza/openstack/charm_tests/policyd/tests.py b/zaza/openstack/charm_tests/policyd/tests.py new file mode 100644 index 0000000..396f46e --- /dev/null +++ b/zaza/openstack/charm_tests/policyd/tests.py @@ -0,0 +1,715 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulate policyd testing. + +The Policyd Tests test the following: + +- Two general tests in the PolicydTest class that check that a policy zip can + drop policy files in the correct service policy.d directory. One test tests + that a valid yaml file is dropped; the 2nd that an invalid one is not dropped + and the workload info status line shows that it is broken. +- A custom policyd test that is per charm and tests that a policy zip file + attached does actually disable something in the associated service (i.e. + verify that the charm has implemented policy overrides and ensured that the + service actually picks them up). + +If a charm doesn't require a specific test, then the GenericPolicydTest class +can be used that just includes the two generic tests. The config in the +tests.yaml would stil be required. See the PolicydTest class docstring for +further details. +""" + +import logging +import os +import shutil +import tempfile +import tenacity +import unittest +import zipfile + +from octaviaclient.api.v2 import octavia as octaviaclient +import cinderclient.exceptions +import heatclient.exc +import glanceclient.common.exceptions +import keystoneauth1 + +import zaza.model as zaza_model + +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.charm_tests.keystone as ch_keystone +import zaza.openstack.utilities.exceptions as zaza_exceptions +import zaza.openstack.charm_tests.octavia.tests as octavia_tests + +from zaza.openstack.utilities import ObjectRetrierWraps + + +class PolicydTest(object): + """Charm operation tests. + + The policyd test needs some config from the tests.yaml in order to work + properly. A top level key of "tests_options". Under that key is + 'policyd', and then the k:v of 'service': . e.g. for keystone + + tests_options: + policyd: + service: keystone + """ + + good = { + "file1.yaml": "{'rule1': '!'}" + } + bad = { + "file2.yaml": "{'rule': '!}" + } + path_infix = "" + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running Policyd charm operation tests.""" + super(PolicydTest, cls).setUpClass(application_name) + cls._tmp_dir = tempfile.mkdtemp() + cls._service_name = \ + cls.test_config['tests_options']['policyd']['service'] + + @classmethod + def tearDownClass(cls): + """Run class tearDown for running Policyd charm operation tests.""" + super(PolicydTest, cls).tearDownClass() + try: + shutil.rmtree(cls._tmp_dir, ignore_errors=True) + except Exception as e: + logging.error("Removing the policyd tempdir/files failed: {}" + .format(str(e))) + + def _set_config(self, state): + s = "True" if state else "False" + config = {"use-policyd-override": s} + logging.info("Setting config to {}".format(config)) + zaza_model.set_application_config(self.application_name, config) + zaza_model.wait_for_agent_status() + + def _make_zip_file_from(self, name, files): + """Make a zip file from a dictionary of filename: string. + + :param name: the name of the zip file + :type name: PathLike + :param files: a dict of name: string to construct the files from. + :type files: Dict[str, str] + :returns: temp file that is the zip file. + :rtype: PathLike + """ + path = os.path.join(self._tmp_dir, name) + with zipfile.ZipFile(path, "w") as zfp: + for name, contents in files.items(): + zfp.writestr(name, contents) + return path + + def _set_policy_with(self, rules, filename='rules.zip'): + rules_zip_path = self._make_zip_file_from(filename, rules) + zaza_model.attach_resource(self.application_name, + 'policyd-override', + rules_zip_path) + self._set_config(True) + zaza_model.block_until_wl_status_info_starts_with( + self.application_name, "PO:", negate_match=False) + + def test_001_policyd_good_yaml(self): + """Test that the policyd with a good zipped yaml file.""" + good = self.good + good_zip_path = self._make_zip_file_from('good.zip', good) + logging.info("Attaching good zip file as a resource.") + zaza_model.attach_resource(self.application_name, + 'policyd-override', + good_zip_path) + zaza_model.block_until_all_units_idle() + logging.debug("Now setting config to true") + self._set_config(True) + # check that the file gets to the right location + if self.path_infix: + path = os.path.join( + "/etc", self._service_name, "policy.d", self.path_infix, + 'file1.yaml') + else: + path = os.path.join( + "/etc", self._service_name, "policy.d", 'file1.yaml') + logging.info("Now checking for file contents: {}".format(path)) + zaza_model.block_until_file_has_contents(self.application_name, + path, + "rule1: '!'") + # ensure that the workload status info line starts with PO: + logging.info("Checking for workload status line starts with PO:") + zaza_model.block_until_wl_status_info_starts_with( + self.application_name, "PO:") + logging.debug("App status is valid") + + # disable the policy override + logging.info("Disabling policy override by setting config to false") + self._set_config(False) + # check that the status no longer has "PO:" on it. + # we have to do it twice due to async races and that some info lines + # erase the PO: bit prior to actuall getting back to idle. The double + # check verifies that the charms have started, the idle waits until it + # is finished, and then the final check really makes sure they got + # switched off. + zaza_model.block_until_wl_status_info_starts_with( + self.application_name, "PO:", negate_match=True) + zaza_model.block_until_all_units_idle() + zaza_model.block_until_wl_status_info_starts_with( + self.application_name, "PO:", negate_match=True) + + # verify that the file no longer exists + logging.info("Checking that {} has been removed".format(path)) + zaza_model.block_until_file_missing(self.application_name, path) + + logging.info("OK") + + def test_002_policyd_bad_yaml(self): + """Test bad yaml file in the zip file is handled.""" + bad = self.bad + bad_zip_path = self._make_zip_file_from('bad.zip', bad) + logging.info("Attaching bad zip file as a resource") + zaza_model.attach_resource(self.application_name, + 'policyd-override', + bad_zip_path) + zaza_model.block_until_all_units_idle() + logging.debug("Now setting config to true") + self._set_config(True) + # ensure that the workload status info line starts with PO (broken): + # to show that it didn't work + logging.info( + "Checking for workload status line starts with PO (broken):") + zaza_model.block_until_wl_status_info_starts_with( + self.application_name, "PO (broken):") + logging.debug("App status is valid for broken yaml file") + zaza_model.block_until_all_units_idle() + # now verify that no file got landed on the machine + if self.path_infix: + path = os.path.join( + "/etc", self._service_name, "policy.d", self.path_infix, + 'file2.yaml') + else: + path = os.path.join( + "/etc", self._service_name, "policy.d", 'file2.yaml') + logging.info("Now checking that file {} is not present.".format(path)) + zaza_model.block_until_file_missing(self.application_name, path) + self._set_config(False) + zaza_model.block_until_all_units_idle() + logging.info("OK") + + +class GenericPolicydTest(PolicydTest, test_utils.OpenStackBaseTest): + """Generic policyd test for any charm without a specific test.""" + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running KeystonePolicydTest tests.""" + super(GenericPolicydTest, cls).setUpClass(application_name) + if (openstack_utils.get_os_release() < + openstack_utils.get_os_release('xenial_queens')): + raise unittest.SkipTest( + "zaza.openstack.charm_tests.policyd.tests.GenericPolicydTest " + "not valid before xenial_queens") + + +class PolicydOperationFailedException(Exception): + """This is raised by the get_client_and_attempt_operation() method. + + This is used to signal that the operation in the + get_client_and_attempt_operation() method in the BaseSpecialization class + has failed. + """ + + pass + + +class BasePolicydSpecialization(PolicydTest, + ch_keystone.BaseKeystoneTest, + test_utils.OpenStackBaseTest): + """Base test for specialising Policyd override tests. + + This class is for specialization of the test to verify that a yaml file + placed in the policy.d director is observed. This is done by first calling + the get_client_and_attempt_operation() method and ensuring that it works. + This method should attempt an operation on the service that can be blocked + by the policy override in the `_rule` class variable. The method should + pass cleanly without the override in place. + + The test_003_test_override_is_observed will then apply the override and + then call get_client_and_attempt_operation() again, and this time it should + detect the failure and raise the PolicydOperationFailedException() + exception. This will be detected as the override working and thus the test + will pass. + + The test will fail if the first call fails for any reason, or if the 2nd + call doesn't raise PolicydOperationFailedException or raises any other + exception. + + To use this class, follow the keystone example: + + class KeystonePolicydTest(BasePolicydSpecialization): + + _rule = {'rule.yaml': "{'identity:list_credentials': '!'}"} + + def get_client_and_attempt_operation(self, keystone_session): + ... etc. + """ + + # this needs to be defined as the rule that gets placed into a yaml policy + # override. It is a string of the form: 'some-rule: "!"' + # i.e. disable some policy and then try and test it. + _rule = None + + # Optional: the name to log at the beginning of the test + _test_name = None + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running KeystonePolicydTest tests.""" + super(BasePolicydSpecialization, cls).setUpClass(application_name) + if (openstack_utils.get_os_release() < + openstack_utils.get_os_release('xenial_queens')): + raise unittest.SkipTest( + "zaza.openstack.charm_tests.policyd.tests.* " + "not valid before xenial_queens") + if cls._rule is None: + raise unittest.SkipTest( + "zaza.openstack.charm_tests.policyd.tests.* " + "not valid if {}.rule is not configured" + .format(cls.__name__)) + + def setup_for_attempt_operation(self, ip): + """Set-up for the policy override if needed. + + This method allows the test being performed in + get_client_and_attempt_operation() to have some setup done before the + test is performed. This is because the method + get_client_and_attempt_operation() is run twice; once to succeed and + once to fail. + + :param ip: the ip of for keystone. + :type ip: str + """ + pass + + def cleanup_for_attempt_operation(self, ip): + """Clean-up after a successful (or not) policy override operation. + + :param ip: the ip of for keystone. + :type ip: str + """ + pass + + def get_client_and_attempt_operation(self, keystone_session): + """Override this method to perform the operation. + + This operation should pass normally for the demo_user, and fail when + the rule has been overriden (see the `rule` class variable. + + :param keystone_session: the keystone session to use to obtain the + client necessary for the test. + :type keystone_session: keystoneauth1.session.Session + :raises: PolicydOperationFailedException if operation fails. + """ + raise NotImplementedError("This method must be overridden") + + def _get_keystone_session(self, ip, openrc, scope='DOMAIN'): + """Return the keystone session for the IP address passed. + + :param ip: the IP address to get the session against. + :type ip: str + :param openrc: the params to authenticate with. + :type openrc: Dict[str, str] + :param scope: the scope of the token + :type scope: str + :returns: a keystone session to the IP address + :rtype: keystoneauth1.session.Session + """ + logging.info('Authentication for {} on keystone IP {}' + .format(openrc['OS_USERNAME'], ip)) + if self.tls_rid: + openrc['OS_CACERT'] = openstack_utils.get_cacert() + openrc['OS_AUTH_URL'] = ( + openrc['OS_AUTH_URL'].replace('http', 'https')) + logging.info('keystone IP {}'.format(ip)) + keystone_session = openstack_utils.get_keystone_session( + openrc, scope=scope) + return keystone_session + + def get_keystone_session_demo_user(self, ip, scope='PROJECT'): + """Return the keystone session for demo user. + + :param ip: the IP address to get the session against. + :type ip: str + :param scope: the scope of the token + :type scope: str + :returns: a keystone session to the IP address + :rtype: keystoneauth1.session.Session + """ + return self._get_keystone_session(ip, { + 'API_VERSION': 3, + 'OS_USERNAME': ch_keystone.DEMO_USER, + 'OS_PASSWORD': ch_keystone.DEMO_PASSWORD, + 'OS_AUTH_URL': 'http://{}:5000/v3'.format(ip), + 'OS_USER_DOMAIN_NAME': ch_keystone.DEMO_DOMAIN, + 'OS_PROJECT_DOMAIN_NAME': ch_keystone.DEMO_DOMAIN, + 'OS_PROJECT_NAME': ch_keystone.DEMO_PROJECT, + 'OS_DOMAIN_NAME': ch_keystone.DEMO_DOMAIN, + }, scope) + + def get_keystone_session_demo_admin_user(self, ip, scope='PROJECT'): + """Return the keystone session demo_admin user. + + :param ip: the IP address to get the session against. + :type ip: str + :param scope: the scope of the token + :type scope: str + :returns: a keystone session to the IP address + :rtype: keystoneauth1.session.Session + """ + return self._get_keystone_session(ip, { + 'API_VERSION': 3, + 'OS_USERNAME': ch_keystone.DEMO_ADMIN_USER, + 'OS_PASSWORD': ch_keystone.DEMO_ADMIN_USER_PASSWORD, + 'OS_AUTH_URL': 'http://{}:5000/v3'.format(ip), + 'OS_USER_DOMAIN_NAME': ch_keystone.DEMO_DOMAIN, + 'OS_PROJECT_DOMAIN_NAME': ch_keystone.DEMO_DOMAIN, + 'OS_PROJECT_NAME': ch_keystone.DEMO_PROJECT, + 'OS_DOMAIN_NAME': ch_keystone.DEMO_DOMAIN, + }, scope) + + def get_keystone_session_admin_user(self, ip): + """Return the keystone session admin user. + + :param ip: the IP address to get the session against. + :type ip: str + :returns: a keystone session to the IP address + :rtype: keystoneauth1.session.Session + """ + return openstack_utils.get_keystone_session( + openstack_utils.get_overcloud_auth(address=ip)) + + def test_003_test_override_is_observed(self): + """Test that the override is observed by the underlying service.""" + if (openstack_utils.get_os_release() < + openstack_utils.get_os_release('xenial_queens')): + raise unittest.SkipTest( + "Test skipped because bug #1880959 won't be fixed for " + "releases older than Queens") + if self._test_name is None: + logging.info("Doing policyd override for {}" + .format(self._service_name)) + else: + logging.info(self._test_name) + # note policyd override only works with Xenial-queens and so keystone + # is already v3 + + # Allow the overriden class to setup the environment before the policyd + # test is performed. + self.setup_for_attempt_operation(self.keystone_ips[0]) + + # verify that the operation works before performing the policyd + # override. + zaza_model.block_until_wl_status_info_starts_with( + self.application_name, "PO:", negate_match=True) + zaza_model.block_until_all_units_idle() + logging.info("First verify that operation works prior to override") + try: + self.get_client_and_attempt_operation(self.keystone_ips[0]) + except Exception as e: + self.cleanup_for_attempt_operation(self.keystone_ips[0]) + raise zaza_exceptions.PolicydError( + 'Service action failed and should have passed. "{}"' + .format(str(e))) + + # now do the policyd override. + logging.info("Doing policyd override with: {}".format(self._rule)) + self._set_policy_with(self._rule) + zaza_model.block_until_wl_status_info_starts_with( + self.application_name, "PO:") + zaza_model.block_until_all_units_idle() + + # now make sure the operation fails + logging.info("Now verify that operation doesn't work with override") + try: + self.get_client_and_attempt_operation(self.keystone_ips[0]) + raise zaza_exceptions.PolicydError( + "Service action passed and should have failed.") + except PolicydOperationFailedException: + pass + except zaza_exceptions.PolicydError as e: + logging.info("{}".format(str(e))) + raise + except Exception as e: + logging.info("exception was: {}".format(e.__class__.__name__)) + import traceback + logging.info(traceback.format_exc()) + self.cleanup_for_attempt_operation(self.keystone_ips[0]) + raise zaza_exceptions.PolicydError( + 'Service action failed in an unexpected way: {}' + .format(str(e))) + + # clean out the policy and wait + self._set_config(False) + # check that the status no longer has "PO:" on it. + # we have to do it twice due to async races and that some info lines + # erase the PO: bit prior to actuall getting back to idle. The double + # check verifies that the charms have started, the idle waits until it + # is finished, and then the final check really makes sure they got + # switched off. + zaza_model.block_until_wl_status_info_starts_with( + self.application_name, "PO:", negate_match=True) + zaza_model.block_until_all_units_idle() + zaza_model.block_until_wl_status_info_starts_with( + self.application_name, "PO:", negate_match=True) + + # Finally make sure it works again! + logging.info("Finally verify that operation works after removing the " + "override.") + try: + self.get_client_and_attempt_operation(self.keystone_ips[0]) + except Exception as e: + raise zaza_exceptions.PolicydError( + 'Service action failed and should have passed after removing ' + 'policy override: "{}"' + .format(str(e))) + finally: + self.cleanup_for_attempt_operation(self.keystone_ips[0]) + + logging.info('OK') + + +class KeystoneTests(BasePolicydSpecialization): + """Test the policyd override using the keystone client.""" + + _rule = {'rule.yaml': "{'identity:list_credentials': '!'}"} + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running NeutronApiTest charm operation tests.""" + super(KeystoneTests, cls).setUpClass( + application_name="keystone") + + def get_client_and_attempt_operation(self, ip): + """Attempt to list services. If it fails, raise an exception. + + This operation should pass normally for the demo_user, and fail when + the rule has been overriden (see the `rule` class variable. + + :param ip: the IP address to get the session against. + :type ip: str + :raises: PolicydOperationFailedException if operation fails. + """ + keystone_client = openstack_utils.get_keystone_session_client( + self.get_keystone_session_demo_admin_user(ip)) + try: + keystone_client.credentials.list() + except keystoneauth1.exceptions.http.Forbidden: + raise PolicydOperationFailedException() + + +class NeutronApiTests(BasePolicydSpecialization): + """Test the policyd override using the neutron client.""" + + _rule = {'rule.yaml': "{'create_network': '!'}"} + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running NeutronApiTest charm operation tests.""" + super(NeutronApiTests, cls).setUpClass(application_name="neutron-api") + cls.application_name = "neutron-api" + + # NOTE(fnordahl): There is a race between `neutron-api` signalling unit is + # ready and the service actually being ready to serve requests. The test + # will fail intermittently unless we gracefully accept this. + # Issue: openstack-charmers/zaza-openstack-tests#138 + @tenacity.retry(wait=tenacity.wait_fixed(1), + reraise=True, stop=tenacity.stop_after_delay(8)) + def get_client_and_attempt_operation(self, ip): + """Attempt to list the networks as a policyd override. + + This operation should pass normally for the demo_user, and fail when + the rule has been overriden (see the `rule` class variable. + + :param ip: the IP address to get the session against. + :type ip: str + :raises: PolicydOperationFailedException if operation fails. + """ + neutron_client = openstack_utils.get_neutron_session_client( + self.get_keystone_session_demo_user(ip)) + try: + # If we are allowed to create networks, this will return something. + # if the policyd override is present, an exception will be raised + created_network = neutron_client.create_network( + { + 'network': { + 'name': 'zaza-policyd-test', + }, + }) + logging.debug("networks: {}".format(created_network)) + neutron_client.delete_network(created_network['network']['id']) + except Exception: + raise PolicydOperationFailedException() + + +class GlanceTests(BasePolicydSpecialization): + """Test the policyd override using the glance client.""" + + _rule = {'rule.yaml': "{'get_images': '!'}"} + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running GlanceTests charm operation tests.""" + super(GlanceTests, cls).setUpClass(application_name="glance") + cls.application_name = "glance" + + # NOTE(lourot): Same as NeutronApiTests. There is a race between the glance + # charm signalling its readiness and the service actually being ready to + # serve requests. The test will fail intermittently unless we gracefully + # accept this. + # Issue: openstack-charmers/zaza-openstack-tests#578 + @tenacity.retry(wait=tenacity.wait_fixed(1), + reraise=True, stop=tenacity.stop_after_delay(8)) + def get_client_and_attempt_operation(self, ip): + """Attempt to list the images as a policyd override. + + This operation should pass normally for the demo_user, and fail when + the rule has been overriden (see the `rule` class variable. + + :param ip: the IP address to get the session against. + :type ip: str + :raises: PolicydOperationFailedException if operation fails. + """ + glance_client = openstack_utils.get_glance_session_client( + self.get_keystone_session_demo_user(ip)) + try: + # NOTE(ajkavanagh) - it turns out that the list() is very important + # as it forces the generator to iterate which only then checkes if + # the api call is authorized. Just getting the generator (from + # .list()) doesn't perform the API call. + images = list(glance_client.images.list()) + logging.debug("images is: {}".format(images)) + except glanceclient.common.exceptions.HTTPForbidden: + raise PolicydOperationFailedException() + + +class CinderTests(BasePolicydSpecialization): + """Test the policyd override using the cinder client.""" + + _rule = {'rule.yaml': "{'volume:get_all': '!'}"} + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running CinderTests charm operation tests.""" + super(CinderTests, cls).setUpClass(application_name="cinder") + cls.application_name = "cinder" + + def get_client_and_attempt_operation(self, ip): + """Attempt to list the images as a policyd override. + + This operation should pass normally for the demo_user, and fail when + the rule has been overriden (see the `rule` class variable. + + :param ip: the IP address to get the session against. + :type ip: str + :raises: PolicydOperationFailedException if operation fails. + """ + cinder_client = openstack_utils.get_cinder_session_client( + self.get_keystone_session_admin_user(ip)) + try: + cinder_client.volumes.list() + except cinderclient.exceptions.Forbidden: + raise PolicydOperationFailedException() + + +class HeatTests(BasePolicydSpecialization): + """Test the policyd override using the heat client.""" + + _rule = {'rule.yaml': "{'stacks:index': '!'}"} + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running HeatTests charm operation tests.""" + super(HeatTests, cls).setUpClass(application_name="heat") + cls.application_name = "heat" + + def get_client_and_attempt_operation(self, ip): + """Attempt to list the heat stacks as a policyd override. + + This operation should pass normally, and fail when + the rule has been overriden (see the `rule` class variable). + + :param ip: the IP address to get the session against. + :type ip: str + :raises: PolicydOperationFailedException if operation fails. + """ + heat_client = openstack_utils.get_heat_session_client( + self.get_keystone_session_admin_user(ip)) + try: + # stacks.list() returns a generator (as opposed to a list), so to + # force the client to actually connect, the generator has to be + # iterated. + list(heat_client.stacks.list()) + except heatclient.exc.HTTPForbidden: + raise PolicydOperationFailedException() + + +class OctaviaTests(BasePolicydSpecialization): + """Test the policyd override using the octavia client.""" + + _rule = {'rule.yaml': "{'os_load-balancer_api:provider:get_all': '!'}"} + + @classmethod + def setUpClass(cls, application_name=None): + """Run class setup for running OctaviaTests charm operation tests.""" + super(OctaviaTests, cls).setUpClass(application_name="octavia") + cls.application_name = "octavia" + cls.keystone_client = ObjectRetrierWraps( + openstack_utils.get_keystone_session_client(cls.keystone_session)) + + if (openstack_utils.get_os_release() >= + openstack_utils.get_os_release('focal_wallaby')): + # add role to admin user for the duration of the test + octavia_tests.grant_role_current_user( + cls.keystone_client, cls.keystone_session, + octavia_tests.LBAAS_ADMIN_ROLE) + + def resource_cleanup(self): + """Restore changes made by test.""" + if (openstack_utils.get_os_release() >= + openstack_utils.get_os_release('focal_wallaby')): + # revoke role from admin user added by this test + octavia_tests.revoke_role_current_user( + self.keystone_client, self.keystone_session, + octavia_tests.LBAAS_ADMIN_ROLE) + + def get_client_and_attempt_operation(self, ip): + """Attempt to list available provider drivers. + + This operation should pass normally, and fail when + the rule has been overriden (see the `rule` class variable. + + :param ip: the IP address to get the session against. + :type ip: str + :raises: PolicydOperationFailedException if operation fails. + """ + octavia_client = openstack_utils.get_octavia_session_client( + self.get_keystone_session_admin_user(ip)) + try: + octavia_client.provider_list() + self.run_resource_cleanup = True + except (octaviaclient.OctaviaClientException, + keystoneauth1.exceptions.http.Forbidden): + raise PolicydOperationFailedException() diff --git a/zaza/openstack/charm_tests/security/__init__.py b/zaza/openstack/charm_tests/quagga/__init__.py similarity index 91% rename from zaza/openstack/charm_tests/security/__init__.py rename to zaza/openstack/charm_tests/quagga/__init__.py index ec47696..e67cd6a 100644 --- a/zaza/openstack/charm_tests/security/__init__.py +++ b/zaza/openstack/charm_tests/quagga/__init__.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Test security checklist.""" +"""Collection of code for testing quagga charm.""" diff --git a/zaza/openstack/charm_tests/quagga/tests.py b/zaza/openstack/charm_tests/quagga/tests.py new file mode 100644 index 0000000..67408f2 --- /dev/null +++ b/zaza/openstack/charm_tests/quagga/tests.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 + +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encapsulating `quagga` testing.""" + +import logging +import re +import unittest + +import zaza + + +class QuaggaTest(unittest.TestCase): + """Class for `quagga` tests.""" + + @classmethod + def setUpClass(cls): + """Run class setup for `quagga` tests.""" + super(QuaggaTest, cls).setUpClass() + + def test_bgp_peer_datapath(self): + """Get peers from BGP neighbour list and ping them.""" + status = zaza.model.get_status() + applications = (app for app in ['spine0', 'spine1', 'tor0', 'tor1', + 'tor2', 'peer0', 'peer1'] + if app in status.applications.keys()) + for application in applications: + for unit in zaza.model.get_units(application): + bgp_sum = zaza.model.run_on_unit( + unit.entity_id, + 'echo "sh bgp ipv4 unicast summary" | vtysh')['Stdout'] + r = re.compile(r'^(\d+\.\d+\.\d+\.\d+)') + ip_list = [] + for line in bgp_sum.splitlines(): + m = r.match(line) + if m: + ip_list.append(m.group(1)) + logging.info('unit {} neighbours {}' + .format(unit.entity_id, ip_list)) + + if not ip_list: + raise Exception('FAILED: Unit {} has no BGP peers.' + .format(unit.entity_id)) + for ip in ip_list: + result = zaza.model.run_on_unit( + unit.entity_id, + 'ping -c 3 {}'.format(ip)) + logging.info(result['Stdout']) + if result['Code'] == '1': + raise Exception('FAILED') diff --git a/zaza/openstack/charm_tests/rabbitmq_server/__init__.py b/zaza/openstack/charm_tests/rabbitmq_server/__init__.py new file mode 100644 index 0000000..cdac408 --- /dev/null +++ b/zaza/openstack/charm_tests/rabbitmq_server/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing rabbitmq server.""" diff --git a/zaza/openstack/charm_tests/rabbitmq_server/tests.py b/zaza/openstack/charm_tests/rabbitmq_server/tests.py new file mode 100644 index 0000000..fed72df --- /dev/null +++ b/zaza/openstack/charm_tests/rabbitmq_server/tests.py @@ -0,0 +1,485 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""RabbitMQ Testing.""" + +import json +import logging +import time +import uuid +import unittest + +import juju +import tenacity +import zaza.model +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.utilities.generic as generic_utils + +from charmhelpers.core.host import CompareHostReleases +from zaza.openstack.utilities.generic import get_series + +from . import utils as rmq_utils +from .utils import RmqNoMessageException + + +class RmqTests(test_utils.OpenStackBaseTest): + """Zaza tests on a basic rabbitmq cluster deployment.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(RmqTests, cls).setUpClass() + + def _get_uuid_epoch_stamp(self): + """Return a string based on uuid4 and epoch time. + + Useful in generating test messages which need to be unique-ish. + """ + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(RmqNoMessageException), + wait=tenacity.wait_fixed(10), + stop=tenacity.stop_after_attempt(2)) + def _retry_get_amqp_message(self, check_unit, ssl=None, port=None): + return rmq_utils.get_amqp_message_by_unit(check_unit, + ssl=ssl, + port=port) + + def _test_rmq_amqp_messages_all_units(self, units, + ssl=False, port=None): + """Reusable test to send/check amqp messages to every listed rmq unit. + + Reusable test to send amqp messages to every listed rmq + unit. Checks every listed rmq unit for messages. + :param units: list of units + :returns: None if successful. Raise on error. + + """ + # Add test user if it does not already exist + rmq_utils.add_user(units) + + # Handle ssl (includes wait-for-cluster) + if ssl: + rmq_utils.configure_ssl_on(units, port=port) + else: + rmq_utils.configure_ssl_off(units) + + # Publish and get amqp messages in all possible unit combinations. + # Qty of checks == qty_of_units * (qty_of_units - 1) + assert len(units) >= 2, 'Test is useful only with 2 units or more.' + + amqp_msg_counter = 1 + host_names = generic_utils.get_unit_hostnames(units) + + for dest_unit in units: + dest_unit_name = dest_unit.entity_id + dest_unit_host = dest_unit.public_address + dest_unit_host_name = host_names[dest_unit_name] + + for check_unit in units: + check_unit_name = check_unit.entity_id + if dest_unit_name == check_unit_name: + logging.info("Skipping check for this unit to itself.") + continue + check_unit_host = check_unit.public_address + check_unit_host_name = host_names[check_unit_name] + + amqp_msg_stamp = self._get_uuid_epoch_stamp() + amqp_msg = ('Message {}@{} {}'.format(amqp_msg_counter, + dest_unit_host, + amqp_msg_stamp)).upper() + # Publish amqp message + logging.info('Publish message to: {} ' + '({} {})'.format(dest_unit_host, + dest_unit_name, + dest_unit_host_name)) + + rmq_utils.publish_amqp_message_by_unit(dest_unit, + amqp_msg, ssl=ssl, + port=port) + + # Get amqp message + logging.info('Get message from: {} ' + '({} {})'.format(check_unit_host, + check_unit_name, + check_unit_host_name)) + + amqp_msg_rcvd = self._retry_get_amqp_message(check_unit, + ssl=ssl, + port=port) + + # Validate amqp message content + if amqp_msg == amqp_msg_rcvd: + logging.info('Message {} received ' + 'OK.'.format(amqp_msg_counter)) + else: + logging.error('Expected: {}'.format(amqp_msg)) + logging.error('Actual: {}'.format(amqp_msg_rcvd)) + msg = 'Message {} mismatch.'.format(amqp_msg_counter) + raise Exception(msg) + + amqp_msg_counter += 1 + + # Delete the test user + rmq_utils.delete_user(units) + + def test_400_rmq_cluster_running_nodes(self): + """Verify cluster status shows every cluster node as running member.""" + logging.info('Checking that all units are in cluster_status ' + 'running nodes...') + + units = zaza.model.get_units(self.application_name) + + ret = rmq_utils.validate_cluster_running_nodes(units) + self.assertIsNone(ret, msg=ret) + + logging.info('OK') + + def test_406_rmq_amqp_messages_all_units_ssl_off(self): + """Send (and check) amqp messages to every rmq unit. + + Sends amqp messages to every rmq unit, and check every rmq + unit for messages. Uses Standard amqp tcp port, no ssl. + + """ + logging.info('Checking amqp message publish/get on all units ' + '(ssl off)...') + + units = zaza.model.get_units(self.application_name) + self._test_rmq_amqp_messages_all_units(units, ssl=False) + logging.info('OK') + + def test_408_rmq_amqp_messages_all_units_ssl_on(self): + """Send (and check) amqp messages to every rmq unit (ssl enabled). + + Sends amqp messages to every rmq unit, and check every rmq + unit for messages. Uses Standard ssl tcp port. + + """ + units = zaza.model.get_units(self.application_name) + + # http://pad.lv/1625044 + if CompareHostReleases(get_series(units[0])) <= 'trusty': + logging.info('SKIP') + logging.info('Skipping SSL tests due to client' + ' compatibility issues') + return + logging.info('Checking amqp message publish/get on all units ' + '(ssl on)...') + + self._test_rmq_amqp_messages_all_units(units, + ssl=True, port=5671) + logging.info('OK') + + @tenacity.retry( + retry=tenacity.retry_if_result(lambda ret: ret is not None), + wait=tenacity.wait_fixed(30), + stop=tenacity.stop_after_attempt(20), + after=rmq_utils._log_tenacity_retry) + def _retry_port_knock_units(self, units, port, expect_success=True): + return generic_utils.port_knock_units(units, port, + expect_success=expect_success) + + def test_412_rmq_management_plugin(self): + """Enable and check management plugin.""" + logging.info('Checking tcp socket connect to management plugin ' + 'port on all rmq units...') + + units = zaza.model.get_units(self.application_name) + mgmt_port = 15672 + + # Enable management plugin + logging.info('Enabling management_plugin charm config option...') + config = {'management_plugin': 'True'} + zaza.model.set_application_config('rabbitmq-server', config) + rmq_utils.wait_for_cluster() + + # Check tcp connect to management plugin port + ret = self._retry_port_knock_units(units, mgmt_port) + + self.assertIsNone(ret, msg=ret) + logging.info('Connect to all units (OK)') + + # Disable management plugin + logging.info('Disabling management_plugin charm config option...') + config = {'management_plugin': 'False'} + zaza.model.set_application_config('rabbitmq-server', config) + rmq_utils.wait_for_cluster() + + # Negative check - tcp connect to management plugin port + logging.info('Expect tcp connect fail since charm config ' + 'option is disabled.') + ret = self._retry_port_knock_units(units, + mgmt_port, + expect_success=False) + + self.assertIsNone(ret, msg=ret) + logging.info('Confirm mgmt port closed on all units (OK)') + + @tenacity.retry( + retry=tenacity.retry_if_result(lambda ret: ret is not None), + # sleep for 2mins to allow 1min cron job to run... + wait=tenacity.wait_fixed(120), + stop=tenacity.stop_after_attempt(2)) + def _retry_check_commands_on_units(self, cmds, units): + return generic_utils.check_commands_on_units(cmds, units) + + def test_414_rmq_nrpe_monitors(self): + """Check rabbimq-server nrpe monitor basic functionality.""" + units = zaza.model.get_units(self.application_name) + host_names = generic_utils.get_unit_hostnames(units) + + # check_rabbitmq monitor + logging.info('Checking nrpe check_rabbitmq on units...') + cmds = ['egrep -oh /usr/local.* /etc/nagios/nrpe.d/' + 'check_rabbitmq.cfg'] + ret = self._retry_check_commands_on_units(cmds, units) + self.assertIsNone(ret, msg=ret) + + # check_rabbitmq_queue monitor + logging.info('Checking nrpe check_rabbitmq_queue on units...') + cmds = ['egrep -oh /usr/local.* /etc/nagios/nrpe.d/' + 'check_rabbitmq_queue.cfg'] + ret = self._retry_check_commands_on_units(cmds, units) + self.assertIsNone(ret, msg=ret) + + # check dat file existence + logging.info('Checking nrpe dat file existence on units...') + for u in units: + unit_host_name = host_names[u.entity_id] + + cmds = [ + 'stat /var/lib/rabbitmq/data/{}_general_stats.dat'.format( + unit_host_name), + 'stat /var/lib/rabbitmq/data/{}_queue_stats.dat'.format( + unit_host_name) + ] + + ret = generic_utils.check_commands_on_units(cmds, [u]) + self.assertIsNone(ret, msg=ret) + + logging.info('OK') + + def test_910_pause_and_resume(self): + """The services can be paused and resumed.""" + logging.info('Checking pause and resume actions...') + + logging.info('Waiting for the cluster to be ready') + rmq_utils.wait_for_cluster() + unit = zaza.model.get_units(self.application_name)[0] + assert unit.workload_status == "active" + + logging.info('Pausing unit {}'.format(unit)) + zaza.model.run_action(unit.entity_id, "pause") + logging.info('Waiting until unit {} reaches "maintenance" state' + ''.format(unit)) + zaza.model.block_until_unit_wl_status(unit.entity_id, "maintenance") + unit = zaza.model.get_unit_from_name(unit.entity_id) + assert unit.workload_status == "maintenance" + + logging.info('Resuming unit {}'.format(unit)) + zaza.model.run_action(unit.entity_id, "resume") + logging.info('Waiting until unit {} reaches "active" state' + ''.format(unit)) + zaza.model.block_until_unit_wl_status(unit.entity_id, "active") + unit = zaza.model.get_unit_from_name(unit.entity_id) + assert unit.workload_status == "active" + + rmq_utils.wait_for_cluster() + logging.info('OK') + + def test_911_cluster_status(self): + """Test rabbitmqctl cluster_status action can be returned.""" + logging.info('Checking cluster status action...') + + unit = zaza.model.get_units(self.application_name)[0] + action = zaza.model.run_action(unit.entity_id, "cluster-status") + self.assertIsInstance(action, juju.action.Action) + + logging.info('OK') + + def test_912_check_queues(self): + """Test rabbitmqctl check_queues action can be returned.""" + logging.info('Checking cluster status action...') + + unit = zaza.model.get_units(self.application_name)[0] + action = zaza.model.run_action(unit.entity_id, "check-queues") + self.assertIsInstance(action, juju.action.Action) + + def test_913_list_unconsumed_queues(self): + """Test rabbitmqctl list-unconsumed-queues action can be returned.""" + logging.info('Checking list-unconsumed-queues action...') + + units = zaza.model.get_units(self.application_name) + self._test_rmq_amqp_messages_all_units(units) + unit = units[0] + action = zaza.model.run_action(unit.entity_id, + 'list-unconsumed-queues') + self.assertIsInstance(action, juju.action.Action) + + queue_count = int(action.results['unconsumed-queue-count']) + assert queue_count > 0, 'Did not find any unconsumed queues.' + + queue_name = 'test' # publish_amqp_message_by_unit default queue name + for i in range(queue_count): + queue_data = json.loads( + action.results['unconsumed-queues'][str(i)]) + if queue_data['name'] == queue_name: + break + else: + assert False, 'Did not find expected queue in result.' + + # Since we just reused _test_rmq_amqp_messages_all_units, we should + # have created the queue if it didn't already exist, but all messages + # should have already been consumed. + if queue_data['messages'] != 0: + logging.error( + '{} has {} remaining messages in {} instead of 0.'.format( + unit.entity_id, queue_data['messages'], + queue_data['name'])) + if queue_data['messages'] >= 1: + logging.error('One message is: {}'.format( + self._retry_get_amqp_message(unit))) + assert False, 'Found unexpected message count.' + + logging.info('OK') + + @tenacity.retry( + retry=tenacity.retry_if_result(lambda errors: bool(errors)), + wait=tenacity.wait_fixed(10), + stop=tenacity.stop_after_attempt(2)) + def _retry_check_unit_cluster_nodes(self, u, unit_node_names): + return rmq_utils.check_unit_cluster_nodes(u, unit_node_names) + + @unittest.skip( + "Skipping as a significant rework is required, see " + "https://github.com/openstack-charmers/zaza-openstack-tests/issues/290" + ) + def test_921_remove_and_add_unit(self): + """Test if unit cleans up when removed from Rmq cluster. + + Test if a unit correctly cleans up by removing itself from the + RabbitMQ cluster on removal. + + Add the unit back to the cluster at the end of the test case to + avoid side-effects. + + """ + logging.info('Checking that units correctly clean up after ' + 'themselves on unit removal...') + config = {'min-cluster-size': '2'} + zaza.model.set_application_config('rabbitmq-server', config) + rmq_utils.wait_for_cluster() + + all_units = zaza.model.get_units(self.application_name) + removed_unit = all_units[-1] + left_units = all_units[:-1] + + logging.info('Simulating unit {} removal'.format(removed_unit)) + zaza.model.run_on_unit(removed_unit.entity_id, 'hooks/stop') + logging.info('Waiting until unit {} reaches "waiting" state' + ''.format(removed_unit)) + zaza.model.block_until_unit_wl_status(removed_unit.entity_id, + "waiting") + + def check_units(units): + unit_host_names = generic_utils.get_unit_hostnames(units) + unit_node_names = [] + for unit in unit_host_names: + unit_node_names.append('rabbit@{}'.format( + unit_host_names[unit])) + errors = [] + + for u in units: + e = self._retry_check_unit_cluster_nodes(u, + unit_node_names) + if e: + errors.append(e) + + self.assertFalse(errors, msg=errors) + + logging.info('Checking that all units except for {} are present' + 'in the cluster'.format(removed_unit)) + check_units(left_units) + + logging.info('Re-adding the removed unit {} back to the cluster' + 'by simulating the upgrade-charm event' + ''.format(removed_unit)) + # TODO(dmitriis): Fix the rabbitmq charm to add a proper way to add a + # unit back to the cluster and replace this. + zaza.model.run_on_unit(removed_unit.entity_id, 'hooks/upgrade-charm') + logging.info('Waiting until unit {} reaches "active" state' + ''.format(removed_unit)) + zaza.model.block_until_unit_wl_status(removed_unit.entity_id, + "active") + logging.info('Checking that all units are present in the cluster') + check_units(all_units) + + logging.info('OK') + + +class RabbitMQDeferredRestartTest(test_utils.BaseDeferredRestartTest): + """Deferred restart tests.""" + + @classmethod + def setUpClass(cls): + """Run setup for deferred restart tests.""" + super().setUpClass(application_name='rabbitmq-server') + + def check_status_message_is_clear(self): + """Check each units status message show no defeerred events.""" + pattern = '(Unit is ready|Unit is ready and clustered)$' + for unit in zaza.model.get_units(self.application_name): + zaza.model.block_until_unit_wl_message_match( + unit.entity_id, + pattern) + zaza.model.block_until_all_units_idle() + + def get_new_config(self): + """Return the config key and new value to trigger a hook execution. + + :returns: Config key and new value + :rtype: (str, bool) + """ + app_config = zaza.model.get_application_config(self.application_name) + new_value = str(int( + app_config['connection-backlog'].get('value', 100) + 1)) + return 'connection-backlog', new_value + + def run_tests(self): + """Run deferred restart tests.""" + # Trigger a config change which triggers a deferred hook. + self.run_charm_change_hook_test('config-changed') + + # Trigger a package change which requires a restart + self.run_package_change_test( + 'rabbitmq-server', + 'rabbitmq-server') + + def check_clear_restarts(self): + """Clear and deferred restarts and check status. + + Clear and deferred restarts and then check the workload status message + for each unit. + """ + # Use action to run any deferred restarts + for unit in zaza.model.get_units(self.application_name): + zaza.model.run_action( + unit.entity_id, + 'restart-services', + action_params={'services': 'rabbitmq-server'}) + + # Check workload status no longer shows deferred restarts. + self.check_status_message_is_clear() diff --git a/zaza/openstack/charm_tests/rabbitmq_server/utils.py b/zaza/openstack/charm_tests/rabbitmq_server/utils.py new file mode 100644 index 0000000..3994178 --- /dev/null +++ b/zaza/openstack/charm_tests/rabbitmq_server/utils.py @@ -0,0 +1,594 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""RabbitMQ Testing utility functions.""" + +import json +import logging + +import pika +import tenacity +import zaza.model + +import ssl as libssl +import zaza.openstack.utilities.generic as generic_utils + + +class RmqNoMessageException(Exception): + """Message retrieval from Rmq resulted in no message.""" + + pass + + +def _log_tenacity_retry(retry_state): + logging.info('Attempt {}: {}'.format(retry_state.attempt_number, + retry_state.outcome.result())) + + +def wait_for_cluster(model_name=None, timeout=1200): + """Wait for Rmq cluster status to show cluster readiness. + + Wait for rmq units extended status to show cluster readiness, + after an optional initial sleep period. Initial sleep is likely + necessary to be effective following a config change, as status + message may not instantly update to non-ready. + """ + states = { + 'rabbitmq-server': { + 'workload-status-messages': 'Unit is ready and clustered' + } + } + + zaza.model.wait_for_application_states(model_name=model_name, + states=states, + timeout=timeout) + + +def add_user(units, username="testuser1", password="changeme"): + """Add a user to a RabbitMQ cluster. + + Add a user via the first rmq juju unit, check connection as + the new user against all units. + :param units: list of unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + logging.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + cmd_result = zaza.model.run_on_unit(units[0].entity_id, cmd_user_list) + output = cmd_result['Stdout'].strip() + if username in output: + logging.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + cmd_result = zaza.model.run_on_unit(units[0].entity_id, cmd) + output = cmd_result['Stdout'].strip() + + # Check connection against the other units + logging.debug('Checking user connect against units...') + for u in units: + connection = connect_amqp_by_unit(u, ssl=False, + username=username, + password=password) + connection.close() + + +def delete_user(units, username="testuser1"): + """Delete a user from a RabbitMQ cluster. + + Delete a rabbitmq user via the first rmq juju unit. + :param units: list of unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + logging.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output = zaza.model.run_on_unit(units[0].entity_id, + cmd_user_list)['Stdout'].strip() + + if username not in output: + logging.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output = zaza.model.run_on_unit(units[0].entity_id, cmd_user_del) + + +def is_rabbitmq_version_ge_382(unit): + """Test is the rabbitmq version on the :param:`unit` is 3.8.2+. + + Returns True if the rabbitmq_server version installed on the :param:`unit` + is >= 3.8.2 + + :param unit: the unit to test + :type unit: :class:`juju.model.ModelEntity` + :returns: True if the server is 3.8.2 or later + :rtype: Boolean + """ + cmd = 'rabbitmqctl version' + output = zaza.model.run_on_unit(unit.entity_id, cmd)['Stdout'].strip() + logging.debug('{} rabbitmq version:{}'.format(unit.entity_id, output)) + try: + return tuple(map(int, output.split('.')[:3])) >= (3, 8, 2) + except Exception: + return False + + +def get_cluster_status(unit): + """Get RabbitMQ cluster status output. + + Execute rabbitmq cluster status command on a unit and return + the full output. + :param unit: unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output = zaza.model.run_on_unit(unit.entity_id, cmd)['Stdout'].strip() + logging.debug('{} cluster_status:\n{}'.format( + unit.entity_id, output)) + return str(output) + + +def get_cluster_running_nodes(unit): + """Get a list of RabbitMQ cluster's running nodes. + + Return a list of the running rabbitmq cluster nodes from the specified + unit. + + NOTE: this calls one of two functions depending on whether the installed + version on the unit is 3.8.2 and newer, or older. If newer then the + --formatter=json option is used to simplify parsing of the cluster data. + + :param unit: the unit to fetch running nodes list from + :type unit: :class:`juju.model.ModelEntity` + :returns: List containing node names of running nodes + :rtype: List[str] + """ + if is_rabbitmq_version_ge_382(unit): + return _get_cluster_running_nodes_38(unit) + else: + return _get_cluster_running_nodes_pre_38(unit) + + +def _get_cluster_running_nodes_pre_38(unit): + """Get a list of RabbitMQ cluster's running nodes (pre 3.8.2). + + Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: unit pointer + :type unit: :class:`juju.model.ModelEntity` + :returns: List containing node names of running nodes + :rtype: List[str] + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = get_cluster_status(unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + +def _get_cluster_running_nodes_38(unit): + """Get a list of RabbitMQ cluster's running nodes (3.8.2+). + + Return a list of the running rabbitmq cluster nodes from the specified + unit. + + :param unit: the unit to fetch running nodes list from + :type unit: :class:`juju.model.ModelEntity` + :returns: List containing node names of running nodes + :rtype: List[str] + """ + cmd = 'rabbitmqctl cluster_status --formatter=json' + output = zaza.model.run_on_unit(unit.entity_id, cmd)['Stdout'].strip() + decoded = json.loads(output) + return decoded['running_nodes'] + + +def validate_cluster_running_nodes(units): + """Check all rmq unit hostnames are represented in cluster_status. + + Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + :param host_names: dict of juju unit names to host names + :param units: list of unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = generic_utils.get_unit_hostnames(units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in units: + query_unit_name = query_unit.entity_id + running_nodes = get_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in units: + val_host_name = host_names[validate_unit.entity_id] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + +def validate_ssl_enabled_units(units, port=None): + """Check that ssl is enabled on rmq juju units. + + :param units: list of all rmq units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for u in units: + if not is_ssl_enabled_on_unit(u, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(u.info['unit_name'])) + return None + + +def validate_ssl_disabled_units(units): + """Check that ssl is enabled on listed rmq juju units. + + :param units: list of all rmq units + :returns: True if successful. Raise on error. + """ + for u in units: + if is_ssl_enabled_on_unit(u): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(u.entity_id)) + return None + + +@tenacity.retry( + retry=tenacity.retry_if_result(lambda errors: bool(errors)), + wait=tenacity.wait_fixed(4), + stop=tenacity.stop_after_attempt(15), + after=_log_tenacity_retry) +def _retry_validate_ssl_enabled_units(units, port=None): + return validate_ssl_enabled_units(units, port=port) + + +def configure_ssl_on(units, model_name=None, port=None): + """Turn RabbitMQ charm SSL config option on. + + Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + :param units: list of units + :param port: amqp port, use defaults if None + :returns: None if successful. Raise on error. + """ + logging.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = str(port) + + zaza.model.set_application_config('rabbitmq-server', + config, + model_name=model_name) + + # Wait for unit status + wait_for_cluster(model_name) + + ret = _retry_validate_ssl_enabled_units(units, port=port) + if ret: + raise Exception(ret) + + +@tenacity.retry( + retry=tenacity.retry_if_result(lambda errors: bool(errors)), + wait=tenacity.wait_fixed(4), + stop=tenacity.stop_after_attempt(15), + after=_log_tenacity_retry) +def _retry_validate_ssl_disabled_units(units): + return validate_ssl_disabled_units(units) + + +def configure_ssl_off(units, model_name=None, max_wait=60): + """Turn RabbitMQ charm SSL config option off. + + Turn ssl charm config option off, confirm that it is disabled + on every unit. + :param units: list of units + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + logging.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + zaza.model.set_application_config('rabbitmq-server', + config, + model_name=model_name) + + # Wait for unit status + wait_for_cluster(model_name) + + ret = _retry_validate_ssl_disabled_units(units) + + if ret: + raise Exception(ret) + + +def is_ssl_enabled_on_unit(unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = unit.public_address + unit_name = unit.entity_id + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(generic_utils.get_file_contents(unit, + conf_file)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + logging.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + logging.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + logging.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not conf_ssl: + logging.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + raise ValueError(msg) + + +def connect_amqp_by_unit(unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service. + + Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + :param unit: unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = unit.public_address + unit_name = unit.entity_id + + if ssl: + # TODO: when Python3.5 support is removed, investigate + # changing protocol to PROTOCOL_TLS + context = libssl.SSLContext(protocol=libssl.PROTOCOL_TLSv1_2) + ssl_options = pika.SSLOptions(context) + else: + ssl_options = None + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + logging.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl_options=ssl_options, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.is_open is True + logging.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + raise Exception(msg) + else: + logging.warn(msg) + return None + + +def publish_amqp_message_by_unit(unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param unit: unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + logging.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = connect_amqp_by_unit(unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + logging.debug('Defining channel...') + channel = connection.channel() + logging.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + logging.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + logging.debug('Closing channel...') + channel.close() + logging.debug('Closing connection...') + connection.close() + + +def get_amqp_message_by_unit(unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param unit: unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = connect_amqp_by_unit(unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + body = body.decode() + + if method_frame: + logging.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + raise RmqNoMessageException(msg) + + +def check_unit_cluster_nodes(unit, unit_node_names): + """Check if unit exists in list of Rmq cluster node names. + + NOTE: this calls one of two functions depending on whether the installed + version on the unit is 3.8.2 and newer, or older. If newer then the + --formatter=json option is used to simplify parsing of the cluster data. + + :param unit: the unit to fetch running nodes list from + :type unit: :class:`juju.model.ModelEntity` + :param unit_node_names: The unit node names to check against + :type unit_node_names: List[str] + :returns: List containing node names of running nodes + :rtype: List[str] + """ + if is_rabbitmq_version_ge_382(unit): + return _check_unit_cluster_nodes_38(unit, unit_node_names) + else: + return _check_unit_cluster_nodes_pre_38(unit, unit_node_names) + + +def _check_unit_cluster_nodes_38(unit, unit_node_names): + """Check if unit exists in list of Rmq cluster node names (3.8.2+). + + :param unit: the unit to fetch running nodes list from + :type unit: :class:`juju.model.ModelEntity` + :param unit_node_names: The unit node names to check against + :type unit_node_names: List[str] + :returns: List containing node names of running nodes + :rtype: List[str] + """ + cmd = 'rabbitmqctl cluster_status --formatter=json' + output = zaza.model.run_on_unit(unit.entity_id, cmd)['Stdout'].strip() + decoded = json.loads(output) + return _post_check_unit_cluster_nodes( + unit, decoded['disk_nodes'], unit_node_names) + + +def _check_unit_cluster_nodes_pre_38(unit, unit_node_names): + """Check if unit exists in list of Rmq cluster node names (pre 3.8.2). + + :param unit: the unit to fetch running nodes list from + :type unit: :class:`juju.model.ModelEntity` + :param unit_node_names: The unit node names to check against + :type unit_node_names: List[str] + :returns: List containing node names of running nodes + :rtype: List[str] + """ + nodes = [] + str_stat = get_cluster_status(unit) + # make the interesting part of rabbitmqctl cluster_status output + # json-parseable. + if 'nodes,[{disc,' in str_stat: + pos_start = str_stat.find('nodes,[{disc,') + 13 + pos_end = str_stat.find(']}]},', pos_start) + 1 + str_nodes = str_stat[pos_start:pos_end].replace("'", '"') + nodes = json.loads(str_nodes) + return _post_check_unit_cluster_nodes(unit, nodes, unit_node_names) + + +def _post_check_unit_cluster_nodes(unit, nodes, unit_node_names): + """Finish of the check_unit_cluster_nodes function (internal).""" + unit_name = unit.entity_id + errors = [] + for node in nodes: + if node not in unit_node_names: + errors.append('Cluster registration check failed on {}: ' + '{} should not be registered with RabbitMQ ' + 'after unit removal.\n' + ''.format(unit_name, node)) + return errors + + +async def complete_cluster_series_upgrade(): + """Run the complete-cluster-series-upgrade action on the lead unit.""" + await zaza.model.async_run_action_on_leader( + 'rabbitmq-server', + 'complete-cluster-series-upgrade', + action_params={}) diff --git a/zaza/openstack/charm_tests/saml_mellon/setup.py b/zaza/openstack/charm_tests/saml_mellon/setup.py index 7e5f96c..20b8cbd 100644 --- a/zaza/openstack/charm_tests/saml_mellon/setup.py +++ b/zaza/openstack/charm_tests/saml_mellon/setup.py @@ -25,6 +25,7 @@ from zaza.openstack.utilities import ( cert as cert_utils, cli as cli_utils, openstack as openstack_utils, + generic as generic_utils, ) @@ -34,8 +35,8 @@ FEDERATED_DOMAIN = "federated_domain" FEDERATED_GROUP = "federated_users" MEMBER = "Member" IDP = "samltest" +LOCAL_IDP_REMOTE_ID = "http://{}/simplesaml/saml2/idp/metadata.php" REMOTE_ID = "https://samltest.id/saml/idp" -MAP_NAME = "{}_mapping".format(IDP) PROTOCOL_NAME = "mapped" MAP_TEMPLATE = ''' [{{ @@ -45,7 +46,7 @@ MAP_TEMPLATE = ''' "name": "{{0}}" }}, "group": {{ - "name": "federated_users", + "name": "{group_id}", "domain": {{ "id": "{domain_id}" }} @@ -55,7 +56,7 @@ MAP_TEMPLATE = ''' "name": "{{0}}_project", "roles": [ {{ - "name": "Member" + "name": "{role_name}" }} ] }} @@ -81,7 +82,10 @@ SP_SIGNING_KEY_INFO_XML_TEMPLATE = ''' ''' -def keystone_federation_setup(): +def keystone_federation_setup(federated_domain=FEDERATED_DOMAIN, + federated_group=FEDERATED_GROUP, + idp_name=IDP, + idp_remote_id=REMOTE_ID): """Configure Keystone Federation.""" cli_utils.setup_logging() keystone_session = openstack_utils.get_overcloud_keystone_session() @@ -89,19 +93,19 @@ def keystone_federation_setup(): keystone_session) try: - domain = keystone_client.domains.find(name=FEDERATED_DOMAIN) + domain = keystone_client.domains.find(name=federated_domain) except keystoneauth1.exceptions.http.NotFound: domain = keystone_client.domains.create( - FEDERATED_DOMAIN, + federated_domain, description="Federated Domain", enabled=True) try: group = keystone_client.groups.find( - name=FEDERATED_GROUP, domain=domain) + name=federated_group, domain=domain) except keystoneauth1.exceptions.http.NotFound: group = keystone_client.groups.create( - FEDERATED_GROUP, + federated_group, domain=domain, enabled=True) @@ -109,30 +113,33 @@ def keystone_federation_setup(): keystone_client.roles.grant(role, group=group, domain=domain) try: - idp = keystone_client.federation.identity_providers.find( - name=IDP, domain_id=domain.id) + idp = keystone_client.federation.identity_providers.get(idp_name) except keystoneauth1.exceptions.http.NotFound: idp = keystone_client.federation.identity_providers.create( - IDP, - remote_ids=[REMOTE_ID], + idp_name, + remote_ids=[idp_remote_id], domain_id=domain.id, enabled=True) - JSON_RULES = json.loads(MAP_TEMPLATE.format(domain_id=domain.id)) + JSON_RULES = json.loads(MAP_TEMPLATE.format( + domain_id=domain.id, group_id=group.id, role_name=MEMBER)) + map_name = "{}_mapping".format(idp_name) try: - keystone_client.federation.mappings.find(name=MAP_NAME) + keystone_client.federation.mappings.get(map_name) except keystoneauth1.exceptions.http.NotFound: keystone_client.federation.mappings.create( - MAP_NAME, rules=JSON_RULES) + map_name, rules=JSON_RULES) try: - keystone_client.federation.protocols.get(IDP, PROTOCOL_NAME) + keystone_client.federation.protocols.get(idp_name, PROTOCOL_NAME) except keystoneauth1.exceptions.http.NotFound: keystone_client.federation.protocols.create( - PROTOCOL_NAME, mapping=MAP_NAME, identity_provider=idp) + PROTOCOL_NAME, mapping=map_name, identity_provider=idp) +# This setup method is deprecated. It will be removed once we fully drop the +# `samltest.id` dependency. def attach_saml_resources(application="keystone-saml-mellon"): """Attach resource to the Keystone SAML Mellon charm.""" test_idp_metadata_xml = "samltest.xml" @@ -161,3 +168,83 @@ def attach_saml_resources(application="keystone-saml-mellon"): fp.flush() zaza.model.attach_resource( application, sp_signing_keyinfo_name, fp.name) + + +def _attach_saml_resources_local_idp(keystone_saml_mellon_app_name=None, + test_saml_idp_app_name=None): + """Attach resources to the Keystone SAML Mellon and the local IdP.""" + action_result = zaza.model.run_action_on_leader( + test_saml_idp_app_name, 'get-idp-metadata') + idp_metadata = action_result.data['results']['output'] + + generic_utils.attach_file_resource( + keystone_saml_mellon_app_name, + 'idp-metadata', + idp_metadata, + '.xml') + + (key, cert) = cert_utils.generate_cert('SP Signing Key') + + cert = cert.decode().strip("-----BEGIN CERTIFICATE-----") + cert = cert.strip("-----END CERTIFICATE-----") + + generic_utils.attach_file_resource( + keystone_saml_mellon_app_name, + 'sp-private-key', + key.decode(), + '.pem') + generic_utils.attach_file_resource( + keystone_saml_mellon_app_name, + 'sp-signing-keyinfo', + SP_SIGNING_KEY_INFO_XML_TEMPLATE.format(cert), + '.xml') + + action_result = zaza.model.run_action_on_leader( + keystone_saml_mellon_app_name, 'get-sp-metadata') + sp_metadata = action_result.data['results']['output'] + + generic_utils.attach_file_resource( + test_saml_idp_app_name, + 'sp-metadata', + sp_metadata, + '.xml') + + +def attach_saml_resources_idp1(): + """Attach the SAML resources for the local IdP #1.""" + _attach_saml_resources_local_idp( + keystone_saml_mellon_app_name="keystone-saml-mellon1", + test_saml_idp_app_name="test-saml-idp1") + + +def attach_saml_resources_idp2(): + """Attach the SAML resources for the local IdP #2.""" + _attach_saml_resources_local_idp( + keystone_saml_mellon_app_name="keystone-saml-mellon2", + test_saml_idp_app_name="test-saml-idp2") + + +def keystone_federation_setup_idp1(): + """Configure Keystone Federation for the local IdP #1.""" + test_saml_idp_unit = zaza.model.get_units("test-saml-idp1")[0] + idp_remote_id = LOCAL_IDP_REMOTE_ID.format( + test_saml_idp_unit.public_address) + + keystone_federation_setup( + federated_domain="federated_domain_idp1", + federated_group="federated_users_idp1", + idp_name="test-saml-idp1", + idp_remote_id=idp_remote_id) + + +def keystone_federation_setup_idp2(): + """Configure Keystone Federation for the local IdP #2.""" + test_saml_idp_unit = zaza.model.get_units("test-saml-idp2")[0] + idp_remote_id = LOCAL_IDP_REMOTE_ID.format( + test_saml_idp_unit.public_address) + + keystone_federation_setup( + federated_domain="federated_domain_idp2", + federated_group="federated_users_idp2", + idp_name="test-saml-idp2", + idp_remote_id=idp_remote_id) diff --git a/zaza/openstack/charm_tests/saml_mellon/tests.py b/zaza/openstack/charm_tests/saml_mellon/tests.py index 28ceb4e..962fa92 100644 --- a/zaza/openstack/charm_tests/saml_mellon/tests.py +++ b/zaza/openstack/charm_tests/saml_mellon/tests.py @@ -21,6 +21,7 @@ import requests import zaza.model from zaza.openstack.charm_tests.keystone import BaseKeystoneTest import zaza.charm_lifecycle.utils as lifecycle_utils +import zaza.openstack.utilities.openstack as openstack_utils class FailedToReachIDP(Exception): @@ -29,6 +30,8 @@ class FailedToReachIDP(Exception): pass +# This testing class is deprecated. It will be removed once we fully drop the +# `samltest.id` dependency. class CharmKeystoneSAMLMellonTest(BaseKeystoneTest): """Charm Keystone SAML Mellon tests.""" @@ -42,6 +45,8 @@ class CharmKeystoneSAMLMellonTest(BaseKeystoneTest): cls.test_config = lifecycle_utils.get_charm_config() cls.application_name = cls.test_config['charm_name'] cls.action = "get-sp-metadata" + cls.current_release = openstack_utils.get_os_release() + cls.FOCAL_USSURI = openstack_utils.get_os_release("focal_ussuri") def test_run_get_sp_metadata_action(self): """Validate the get-sp-metadata action.""" @@ -92,8 +97,13 @@ class CharmKeystoneSAMLMellonTest(BaseKeystoneTest): else: proto = "http" + # Use Keystone URL for < Focal + if self.current_release < self.FOCAL_USSURI: + region = "{}://{}:5000/v3".format(proto, keystone_ip) + else: + region = "default" + url = "{}://{}/horizon/auth/login/".format(proto, horizon_ip) - region = "{}://{}:5000/v3".format(proto, keystone_ip) horizon_expect = ('') @@ -148,3 +158,200 @@ class CharmKeystoneSAMLMellonTest(BaseKeystoneTest): # We may need to try/except to allow horizon to build its pages _do_redirect_check(url, region, idp_expect, horizon_expect) logging.info("SUCCESS") + + +class BaseCharmKeystoneSAMLMellonTest(BaseKeystoneTest): + """Charm Keystone SAML Mellon tests.""" + + @classmethod + def setUpClass(cls, + application_name="keystone-saml-mellon", + test_saml_idp_app_name="test-saml-idp", + horizon_idp_option_name="myidp_mapped", + horizon_idp_display_name="myidp via mapped"): + """Run class setup for running Keystone SAML Mellon charm tests.""" + super(BaseCharmKeystoneSAMLMellonTest, cls).setUpClass() + # Note: The BaseKeystoneTest class sets the application_name to + # "keystone" which breaks keystone-saml-mellon actions. Explicitly set + # application name here. + cls.application_name = application_name + cls.test_saml_idp_app_name = test_saml_idp_app_name + cls.horizon_idp_option_name = horizon_idp_option_name + cls.horizon_idp_display_name = horizon_idp_display_name + cls.action = "get-sp-metadata" + cls.current_release = openstack_utils.get_os_release() + cls.FOCAL_USSURI = openstack_utils.get_os_release("focal_ussuri") + + @staticmethod + def check_horizon_redirect(horizon_url, horizon_expect, + horizon_idp_option_name, horizon_region, + idp_url, idp_expect): + """Validate the Horizon -> Keystone -> IDP redirects. + + This validation is done through `requests.session()`, and the proper + get / post http calls. + + :param horizon_url: The login page for the Horizon OpenStack dashboard. + :type horizon_url: string + :param horizon_expect: Information that needs to be displayed by + Horizon login page, when there is a proper + SAML IdP configuration. + :type horizon_expect: string + :param horizon_idp_option_name: The name of the IdP that is chosen + in the Horizon dropdown from the login + screen. This will go in the post body + as 'auth_type'. + :type horizon_idp_option_name: string + :param horizon_region: Information needed to complete the http post + data for the Horizon login. + :type horizon_region: string + :param idp_url: The url for the IdP where the user needs to be + redirected. + :type idp_url: string + :param idp_expect: Information that needs to be displayed by the IdP + after the user is redirected there. + :type idp_expect: string + :returns: None + """ + # start session, get csrftoken + client = requests.session() + # Verify=False see note below + login_page = client.get(horizon_url, verify=False) + + # Validate SAML method is available + assert horizon_expect in login_page.text + + # Get cookie + if "csrftoken" in client.cookies: + csrftoken = client.cookies["csrftoken"] + else: + raise Exception("Missing csrftoken") + + # Build and send post request + form_data = { + "auth_type": horizon_idp_option_name, + "csrfmiddlewaretoken": csrftoken, + "next": "/horizon/project/api_access", + "region": horizon_region, + } + + # Verify=False due to CA certificate bundles. + # If we don't set it validation fails for keystone/horizon + # We would have to install the keystone CA onto the system + # to validate end to end. + response = client.post( + horizon_url, + data=form_data, + headers={"Referer": horizon_url}, + allow_redirects=True, + verify=False) + + if idp_expect not in response.text: + msg = "FAILURE code={} text={}".format(response, response.text) + # Raise a custom exception. + raise FailedToReachIDP(msg) + + # Validate that we were redirected to the proper IdP + assert response.url.startswith(idp_url) + assert idp_url in response.text + + def test_run_get_sp_metadata_action(self): + """Validate the get-sp-metadata action.""" + unit = zaza.model.get_units(self.application_name)[0] + ip = self.vip if self.vip else unit.public_address + + action = zaza.model.run_action(unit.entity_id, self.action) + self.assertNotIn( + "failed", + action.data["status"], + msg="The action failed: {}".format(action.data["message"])) + + output = action.data["results"]["output"] + root = etree.fromstring(output) + for item in root.items(): + if "entityID" in item[0]: + self.assertIn(ip, item[1]) + + for appt in root.getchildren(): + for elem in appt.getchildren(): + for item in elem.items(): + if "Location" in item[0]: + self.assertIn(ip, item[1]) + + logging.info("Successul get-sp-metadata action") + + def test_saml_mellon_redirects(self): + """Validate the horizon -> keystone -> IDP redirects.""" + unit = zaza.model.get_units(self.application_name)[0] + keystone_ip = self.vip if self.vip else unit.public_address + + horizon = "openstack-dashboard" + horizon_config = zaza.model.get_application_config(horizon) + horizon_vip = horizon_config.get("vip").get("value") + unit = zaza.model.get_units("openstack-dashboard")[0] + + horizon_ip = horizon_vip if horizon_vip else unit.public_address + proto = "https" if self.tls_rid else "http" + + # Use Keystone URL for < Focal + if self.current_release < self.FOCAL_USSURI: + region = "{}://{}:5000/v3".format(proto, keystone_ip) + else: + region = "default" + + idp_address = zaza.model.get_units( + self.test_saml_idp_app_name)[0].public_address + + horizon_url = "{}://{}/horizon/auth/login/".format(proto, horizon_ip) + horizon_expect = ''.format( + self.horizon_idp_option_name, self.horizon_idp_display_name) + idp_url = ("http://{0}/simplesaml/" + "module.php/core/loginuserpass.php").format(idp_address) + # This is the message the local test-saml-idp displays after you are + # redirected. It shows we have been directed to: + # horizon -> keystone -> test-saml-idp + idp_expect = ( + "A service has requested you to authenticate yourself. Please " + "enter your username and password in the form below.") + + # Execute the check + BaseCharmKeystoneSAMLMellonTest.check_horizon_redirect( + horizon_url=horizon_url, + horizon_expect=horizon_expect, + horizon_idp_option_name=self.horizon_idp_option_name, + horizon_region=region, + idp_url=idp_url, + idp_expect=idp_expect) + logging.info("SUCCESS") + + +class CharmKeystoneSAMLMellonIDP1Test(BaseCharmKeystoneSAMLMellonTest): + """Charm Keystone SAML Mellon tests class for the local IDP #1.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running Keystone SAML Mellon charm tests. + + It does the necessary setup for the local IDP #1. + """ + super(CharmKeystoneSAMLMellonIDP1Test, cls).setUpClass( + application_name="keystone-saml-mellon1", + test_saml_idp_app_name="test-saml-idp1", + horizon_idp_option_name="test-saml-idp1_mapped", + horizon_idp_display_name="Test SAML IDP #1") + + +class CharmKeystoneSAMLMellonIDP2Test(BaseCharmKeystoneSAMLMellonTest): + """Charm Keystone SAML Mellon tests class for the local IDP #2.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running Keystone SAML Mellon charm tests. + + It does the necessary setup for the local IDP #2. + """ + super(CharmKeystoneSAMLMellonIDP2Test, cls).setUpClass( + application_name="keystone-saml-mellon2", + test_saml_idp_app_name="test-saml-idp2", + horizon_idp_option_name="test-saml-idp2_mapped", + horizon_idp_display_name="Test SAML IDP #2") diff --git a/zaza/openstack/charm_tests/security/tests.py b/zaza/openstack/charm_tests/security/tests.py deleted file mode 100644 index 741b53b..0000000 --- a/zaza/openstack/charm_tests/security/tests.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2018 Canonical Ltd. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Encapsulate general security testing.""" - -import unittest - -import zaza.model as model -import zaza.charm_lifecycle.utils as utils -from zaza.openstack.utilities.file_assertions import ( - assert_path_glob, - assert_single_file, -) - - -def _make_test_function(application, file_details, paths=None): - """Generate a test function given the specified inputs. - - :param application: Application name to assert file ownership on - :type application: str - :param file_details: Dictionary of file details to test - :type file_details: dict - :param paths: List of paths to test in this application - :type paths: Optional[list(str)] - :returns: Test function - :rtype: unittest.TestCase - """ - def test(self): - for unit in model.get_units(application): - unit = unit.entity_id - if '*' in file_details['path']: - assert_path_glob(self, unit, file_details, paths) - else: - assert_single_file(self, unit, file_details) - return test - - -def _add_tests(): - """Add tests to the unittest.TestCase.""" - def class_decorator(cls): - """Add tests based on input yaml to `cls`.""" - files = utils.get_charm_config('./file-assertions.yaml') - deployed_applications = model.sync_deployed() - for name, attributes in files.items(): - # Lets make sure to only add tests for deployed applications - if name in deployed_applications: - paths = [ - file['path'] for - file in attributes['files'] - if "*" not in file["path"] - ] - for file in attributes['files']: - test_func = _make_test_function(name, file, paths=paths) - setattr( - cls, - 'test_{}_{}'.format(name, file['path']), - test_func) - return cls - return class_decorator - - -class FileOwnershipTest(unittest.TestCase): - """Encapsulate File ownership tests.""" - - pass - - -FileOwnershipTest = _add_tests()(FileOwnershipTest) diff --git a/zaza/openstack/charm_tests/series_upgrade/parallel_tests.py b/zaza/openstack/charm_tests/series_upgrade/parallel_tests.py new file mode 100644 index 0000000..e524fa2 --- /dev/null +++ b/zaza/openstack/charm_tests/series_upgrade/parallel_tests.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python3 + +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Define class for Series Upgrade.""" + +import asyncio +import logging +import os +import sys +import unittest +import juju + +from zaza import model +from zaza.openstack.utilities import ( + cli as cli_utils, + upgrade_utils as upgrade_utils, +) +from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest +from zaza.openstack.utilities import ( + parallel_series_upgrade, +) + + +def _filter_easyrsa(app, app_config, model_name=None): + charm_name = upgrade_utils.extract_charm_name_from_url(app_config['charm']) + if "easyrsa" in charm_name: + logging.warn("Skipping series upgrade of easyrsa Bug #1850121") + return True + return False + + +def _filter_etcd(app, app_config, model_name=None): + charm_name = upgrade_utils.extract_charm_name_from_url(app_config['charm']) + if "etcd" in charm_name: + logging.warn("Skipping series upgrade of easyrsa Bug #1850124") + return True + return False + + +class ParallelSeriesUpgradeTest(unittest.TestCase): + """Class to encapsulate Series Upgrade Tests.""" + + @classmethod + def setUpClass(cls): + """Run setup for Series Upgrades.""" + # NOTE(ajkavanagh): Set the jujulib Connection frame size to 4GB to + # cope with all the outputs from series upgrade; long term, don't send + # that output back, which will require that the upgrade function in the + # charm doesn't capture the output of the upgrade in the action, but + # instead puts it somewhere that can by "juju scp"ed. + juju.client.connection.Connection.MAX_FRAME_SIZE = 2**32 + cli_utils.setup_logging() + cls.from_series = None + cls.to_series = None + cls.workaround_script = None + cls.files = [] + + def test_200_run_series_upgrade(self): + """Run series upgrade.""" + # Set Feature Flag + os.environ["JUJU_DEV_FEATURE_FLAGS"] = "upgrade-series" + upgrade_groups = upgrade_utils.get_series_upgrade_groups( + extra_filters=[_filter_etcd, _filter_easyrsa]) + from_series = self.from_series + to_series = self.to_series + completed_machines = [] + workaround_script = None + files = [] + applications = model.get_status().applications + for group_name, apps in upgrade_groups: + logging.info("About to upgrade {} from {} to {}".format( + group_name, from_series, to_series)) + upgrade_functions = [] + if group_name in ["Database Services", + "Stateful Services", + "Data Plane", + "sweep_up"]: + logging.info("Going to upgrade {} unit by unit".format(apps)) + upgrade_function = \ + parallel_series_upgrade.serial_series_upgrade + else: + logging.info("Going to upgrade {} all at once".format(apps)) + upgrade_function = \ + parallel_series_upgrade.parallel_series_upgrade + + # allow up to 4 parallel upgrades at a time. This is to limit the + # amount of data/calls that asyncio is handling as it's gets + # unstable if all the applications are done at the same time. + sem = asyncio.Semaphore(4) + for charm_name in apps: + charm = applications[charm_name]['charm'] + name = upgrade_utils.extract_charm_name_from_url(charm) + upgrade_config = parallel_series_upgrade.app_config(name) + upgrade_functions.append( + wrap_coroutine_with_sem( + sem, + upgrade_function( + charm_name, + **upgrade_config, + from_series=from_series, + to_series=to_series, + completed_machines=completed_machines, + workaround_script=workaround_script, + files=files))) + asyncio.get_event_loop().run_until_complete( + asyncio.gather(*upgrade_functions)) + model.block_until_all_units_idle() + logging.info("Finished {}".format(group_name)) + logging.info("Done!") + + +async def wrap_coroutine_with_sem(sem, coroutine): + """Wrap a coroutine with a semaphore to limit concurrency. + + :param sem: The semaphore to limit concurrency + :type sem: asyncio.Semaphore + :param coroutine: the corouting to limit concurrency + :type coroutine: types.CoroutineType + """ + async with sem: + await coroutine + + +class OpenStackParallelSeriesUpgrade(ParallelSeriesUpgradeTest): + """OpenStack Series Upgrade. + + Full OpenStack series upgrade with VM launch before and after the series + upgrade. + + This test requires a full OpenStack including at least: keystone, glance, + nova-cloud-controller, nova-compute, neutron-gateway, neutron-api and + neutron-openvswitch. + """ + + @classmethod + def setUpClass(cls): + """Run setup for Series Upgrades.""" + super(OpenStackParallelSeriesUpgrade, cls).setUpClass() + cls.lts = LTSGuestCreateTest() + cls.lts.setUpClass() + + def test_100_validate_pre_series_upgrade_cloud(self): + """Validate pre series upgrade.""" + logging.info("Validate pre-series-upgrade: Spin up LTS instance") + self.lts.test_launch_small_instance() + + def test_500_validate_series_upgraded_cloud(self): + """Validate post series upgrade.""" + logging.info("Validate post-series-upgrade: Spin up LTS instance") + self.lts.test_launch_small_instance() + + +class TrustyXenialSeriesUpgrade(OpenStackParallelSeriesUpgrade): + """OpenStack Trusty to Xenial Series Upgrade.""" + + @classmethod + def setUpClass(cls): + """Run setup for Trusty to Xenial Series Upgrades.""" + super(TrustyXenialSeriesUpgrade, cls).setUpClass() + cls.from_series = "trusty" + cls.to_series = "xenial" + + +class XenialBionicSeriesUpgrade(OpenStackParallelSeriesUpgrade): + """OpenStack Xenial to Bionic Series Upgrade.""" + + @classmethod + def setUpClass(cls): + """Run setup for Xenial to Bionic Series Upgrades.""" + super(XenialBionicSeriesUpgrade, cls).setUpClass() + cls.from_series = "xenial" + cls.to_series = "bionic" + + +class BionicFocalSeriesUpgrade(OpenStackParallelSeriesUpgrade): + """OpenStack Bionic to FocalSeries Upgrade.""" + + @classmethod + def setUpClass(cls): + """Run setup for Xenial to Bionic Series Upgrades.""" + super(BionicFocalSeriesUpgrade, cls).setUpClass() + cls.from_series = "bionic" + cls.to_series = "focal" + + +class UbuntuLiteParallelSeriesUpgrade(unittest.TestCase): + """ubuntu Lite Parallel Series Upgrade.""" + + @classmethod + def setUpClass(cls): + """Run setup for Series Upgrades.""" + cli_utils.setup_logging() + cls.from_series = None + cls.to_series = None + + def test_200_run_series_upgrade(self): + """Run series upgrade.""" + # Set Feature Flag + os.environ["JUJU_DEV_FEATURE_FLAGS"] = "upgrade-series" + parallel_series_upgrade.upgrade_ubuntu_lite( + from_series=self.from_series, + to_series=self.to_series + ) + + +class TrustyXenialSeriesUpgradeUbuntu(UbuntuLiteParallelSeriesUpgrade): + """OpenStack Trusty to Xenial Series Upgrade.""" + + @classmethod + def setUpClass(cls): + """Run setup for Trusty to Xenial Series Upgrades.""" + super(TrustyXenialSeriesUpgradeUbuntu, cls).setUpClass() + cls.from_series = "trusty" + cls.to_series = "xenial" + + +class XenialBionicSeriesUpgradeUbuntu(UbuntuLiteParallelSeriesUpgrade): + """OpenStack Xenial to Bionic Series Upgrade.""" + + @classmethod + def setUpClass(cls): + """Run setup for Xenial to Bionic Series Upgrades.""" + super(XenialBionicSeriesUpgradeUbuntu, cls).setUpClass() + cls.from_series = "xenial" + cls.to_series = "bionic" + + +class BionicFocalSeriesUpgradeUbuntu(UbuntuLiteParallelSeriesUpgrade): + """OpenStack Bionic to FocalSeries Upgrade.""" + + @classmethod + def setUpClass(cls): + """Run setup for Xenial to Bionic Series Upgrades.""" + super(BionicFocalSeriesUpgradeUbuntu, cls).setUpClass() + cls.from_series = "bionic" + cls.to_series = "focal" + + +if __name__ == "__main__": + from_series = os.environ.get("FROM_SERIES") + if from_series == "trusty": + to_series = "xenial" + series_upgrade_test = TrustyXenialSeriesUpgrade() + elif from_series == "xenial": + to_series = "bionic" + series_upgrade_test = XenialBionicSeriesUpgrade() + elif from_series == "bionic": + to_series = "focal" + series_upgrade_test = BionicFocalSeriesUpgrade() + + else: + raise Exception("FROM_SERIES is not set to a vailid LTS series") + series_upgrade_test.setUpClass() + sys.exit(series_upgrade_test.test_200_run_series_upgrade()) diff --git a/zaza/openstack/charm_tests/series_upgrade/tests.py b/zaza/openstack/charm_tests/series_upgrade/tests.py index 5c088e9..122af11 100644 --- a/zaza/openstack/charm_tests/series_upgrade/tests.py +++ b/zaza/openstack/charm_tests/series_upgrade/tests.py @@ -16,6 +16,7 @@ """Define class for Series Upgrade.""" +import asyncio import logging import os import unittest @@ -23,13 +24,14 @@ import unittest from zaza import model from zaza.openstack.utilities import ( cli as cli_utils, - generic as generic_utils, + series_upgrade as series_upgrade_utils, + upgrade_utils as upgrade_utils, ) from zaza.openstack.charm_tests.nova.tests import LTSGuestCreateTest class SeriesUpgradeTest(unittest.TestCase): - """Class to encapsulate Sereis Upgrade Tests.""" + """Class to encapsulate Series Upgrade Tests.""" @classmethod def setUpClass(cls): @@ -47,56 +49,51 @@ class SeriesUpgradeTest(unittest.TestCase): applications = model.get_status().applications completed_machines = [] - for application in applications: - # Defaults - origin = "openstack-origin" - pause_non_leader_subordinate = True - pause_non_leader_primary = True + for application, app_details in applications.items(): # Skip subordinates - if applications[application]["subordinate-to"]: + if app_details["subordinate-to"]: continue - if "percona-cluster" in applications[application]["charm"]: - origin = "source" - pause_non_leader_primary = True - pause_non_leader_subordinate = True - if "rabbitmq-server" in applications[application]["charm"]: - origin = "source" - pause_non_leader_primary = True - pause_non_leader_subordinate = False - if "nova-compute" in applications[application]["charm"]: - pause_non_leader_primary = False - pause_non_leader_subordinate = False - if "ceph" in applications[application]["charm"]: - origin = "source" - pause_non_leader_primary = False - pause_non_leader_subordinate = False - if "memcached" in applications[application]["charm"]: - origin = None - pause_non_leader_primary = False - pause_non_leader_subordinate = False - if ("mongodb" in applications[application]["charm"] or - "vault" in applications[application]["charm"]): - # Mongodb and vault need to run series upgrade - # on its secondaries first. - generic_utils.series_upgrade_non_leaders_first( - application, - from_series=self.from_series, - to_series=self.to_series, - completed_machines=completed_machines) + if "easyrsa" in app_details["charm"]: + logging.warn( + "Skipping series upgrade of easyrsa Bug #1850121") continue - - # The rest are likley APIs use defaults - - generic_utils.series_upgrade_application( + if "etcd" in app_details["charm"]: + logging.warn( + "Skipping series upgrade of etcd Bug #1850124") + continue + charm_name = upgrade_utils.extract_charm_name_from_url( + app_details['charm']) + upgrade_config = series_upgrade_utils.app_config( + charm_name, + is_async=False) + upgrade_function = upgrade_config.pop('upgrade_function') + logging.warn("About to upgrade {}".format(application)) + upgrade_function( application, - pause_non_leader_primary=pause_non_leader_primary, - pause_non_leader_subordinate=pause_non_leader_subordinate, + **upgrade_config, from_series=self.from_series, to_series=self.to_series, - origin=origin, completed_machines=completed_machines, workaround_script=self.workaround_script, - files=self.files) + files=self.files, + ) + if "rabbitmq-server" in app_details["charm"]: + logging.info( + "Running complete-cluster-series-upgrade action on leader") + model.run_action_on_leader( + application, + 'complete-cluster-series-upgrade', + action_params={}) + model.block_until_all_units_idle() + + if "percona-cluster" in app_details["charm"]: + logging.info( + "Running complete-cluster-series-upgrade action on leader") + model.run_action_on_leader( + application, + 'complete-cluster-series-upgrade', + action_params={}) + model.block_until_all_units_idle() class OpenStackSeriesUpgrade(SeriesUpgradeTest): @@ -178,5 +175,96 @@ class XenialBionicSeriesUpgrade(SeriesUpgradeTest): cls.to_series = "bionic" +class ParallelSeriesUpgradeTest(unittest.TestCase): + """Class to encapsulate Series Upgrade Tests.""" + + @classmethod + def setUpClass(cls): + """Run setup for Series Upgrades.""" + cli_utils.setup_logging() + cls.from_series = None + cls.to_series = None + cls.workaround_script = None + cls.files = [] + + def test_200_run_series_upgrade(self): + """Run series upgrade.""" + # Set Feature Flag + os.environ["JUJU_DEV_FEATURE_FLAGS"] = "upgrade-series" + upgrade_groups = upgrade_utils.get_series_upgrade_groups( + extra_filters=[upgrade_utils._filter_etcd, + upgrade_utils._filter_easyrsa]) + applications = model.get_status().applications + completed_machines = [] + for group_name, group in upgrade_groups: + logging.warn("About to upgrade {} ({})".format(group_name, group)) + upgrade_group = [] + for application, app_details in applications.items(): + if application not in group: + continue + charm_name = upgrade_utils.extract_charm_name_from_url( + app_details['charm']) + upgrade_config = series_upgrade_utils.app_config(charm_name) + upgrade_function = upgrade_config.pop('upgrade_function') + logging.warn("About to upgrade {}".format(application)) + upgrade_group.append( + upgrade_function( + application, + **upgrade_config, + from_series=self.from_series, + to_series=self.to_series, + completed_machines=completed_machines, + workaround_script=self.workaround_script, + files=self.files, + )) + asyncio.get_event_loop().run_until_complete( + asyncio.gather(*upgrade_group)) + if "rabbitmq-server" in group: + logging.info( + "Running complete-cluster-series-upgrade action on leader") + model.run_action_on_leader( + 'rabbitmq-server', + 'complete-cluster-series-upgrade', + action_params={}) + model.block_until_all_units_idle() + + if "percona-cluster" in group: + logging.info( + "Running complete-cluster-series-upgrade action on leader") + model.run_action_on_leader( + 'mysql', + 'complete-cluster-series-upgrade', + action_params={}) + model.block_until_all_units_idle() + + +class ParallelTrustyXenialSeriesUpgrade(ParallelSeriesUpgradeTest): + """Trusty to Xenial Series Upgrade. + + Makes no assumptions about what is in the deployment. + """ + + @classmethod + def setUpClass(cls): + """Run setup for Trusty to Xenial Series Upgrades.""" + super(ParallelTrustyXenialSeriesUpgrade, cls).setUpClass() + cls.from_series = "trusty" + cls.to_series = "xenial" + + +class ParallelXenialBionicSeriesUpgrade(ParallelSeriesUpgradeTest): + """Xenial to Bionic Series Upgrade. + + Makes no assumptions about what is in the deployment. + """ + + @classmethod + def setUpClass(cls): + """Run setup for Xenial to Bionic Series Upgrades.""" + super(ParallelXenialBionicSeriesUpgrade, cls).setUpClass() + cls.from_series = "xenial" + cls.to_series = "bionic" + + if __name__ == "__main__": unittest.main() diff --git a/zaza/openstack/charm_tests/swift/setup.py b/zaza/openstack/charm_tests/swift/setup.py new file mode 100644 index 0000000..eb98806 --- /dev/null +++ b/zaza/openstack/charm_tests/swift/setup.py @@ -0,0 +1,38 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for configuring swift.""" + +import logging +import tenacity + +import zaza.openstack.utilities.openstack as openstack + + +@tenacity.retry(wait=tenacity.wait_exponential(multiplier=10, max=300), + reraise=True, stop=tenacity.stop_after_attempt(10), + retry=tenacity.retry_if_exception_type(AssertionError)) +def wait_for_region2(): + """Ensure two regions are present.""" + keystone_session = openstack.get_overcloud_keystone_session() + keystone_client = ( + openstack.get_keystone_session_client( + keystone_session, + client_api_version='3')) + swift_svc_id = keystone_client.services.find(name='swift').id + regions = set([ep.region + for ep in keystone_client.endpoints.list(swift_svc_id)]) + logging.info('Checking there are 2 regions. Current count is {}'.format( + len(regions))) + assert len(set(regions)) == 2, "Incorrect number of regions" diff --git a/zaza/openstack/charm_tests/swift/tests.py b/zaza/openstack/charm_tests/swift/tests.py index dd2e4e1..62fe02c 100644 --- a/zaza/openstack/charm_tests/swift/tests.py +++ b/zaza/openstack/charm_tests/swift/tests.py @@ -17,11 +17,17 @@ """Encapsulate swift testing.""" import logging +import pprint +import tenacity +import zaza.model import zaza.openstack.charm_tests.test_utils as test_utils import zaza.openstack.charm_tests.glance.setup as glance_setup import zaza.openstack.configure.guest import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.utilities.swift as swift_utils + +import boto3 class SwiftImageCreateTest(test_utils.OpenStackBaseTest): @@ -62,6 +68,10 @@ class SwiftImageCreateTest(test_utils.OpenStackBaseTest): self.assertEqual(image['size'], total_bytes) openstack_utils.delete_image(self.glance_client, image['id']) + +class SwiftProxyTests(test_utils.OpenStackBaseTest): + """Tests specific to swift proxy.""" + def test_901_pause_resume(self): """Run pause and resume tests. @@ -80,3 +90,242 @@ class SwiftImageCreateTest(test_utils.OpenStackBaseTest): 'diskusage', action_params={}) self.assertEqual(action.status, "completed") + + +class SwiftStorageTests(test_utils.OpenStackBaseTest): + """Tests specific to swift storage.""" + + def test_901_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + services = ['swift-account-server', + 'swift-account-auditor', + 'swift-account-reaper', + 'swift-container-server', + 'swift-container-auditor', + 'swift-container-updater', + 'swift-object-server', + 'swift-object-auditor', + 'swift-object-updater', + 'swift-container-sync'] + + current_os_release = openstack_utils.get_os_release() + focal_victoria = openstack_utils.get_os_release('focal_victoria') + if current_os_release < focal_victoria: + services += ['swift-account-replicator', + 'swift-container-replicator', + 'swift-object-replicator'] + else: + services += ['swift-account-server', + 'swift-container-server', + 'swift-object-server'] + + with self.pause_resume(services): + logging.info("Testing pause resume") + + +class SwiftGlobalReplicationTests(test_utils.OpenStackBaseTest): + """Test swift global replication.""" + + RESOURCE_PREFIX = 'zaza-swift-gr-tests' + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + cls.region1_model_alias = 'swift_gr_region1' + cls.region1_proxy_app = 'swift-proxy-region1' + cls.region2_model_alias = 'swift_gr_region2' + cls.region2_proxy_app = 'swift-proxy-region2' + super(SwiftGlobalReplicationTests, cls).setUpClass( + application_name=cls.region1_proxy_app, + model_alias=cls.region1_model_alias) + cls.region1_model_name = cls.model_aliases[cls.region1_model_alias] + cls.region2_model_name = cls.model_aliases[cls.region2_model_alias] + cls.storage_topology = swift_utils.get_swift_storage_topology( + model_name=cls.region1_model_name) + cls.storage_topology.update( + swift_utils.get_swift_storage_topology( + model_name=cls.region2_model_name)) + cls.swift_session = openstack_utils.get_keystone_session_from_relation( + cls.region1_proxy_app, + model_name=cls.region1_model_name) + cls.swift_region1 = openstack_utils.get_swift_session_client( + cls.swift_session, + region_name='RegionOne') + cls.swift_region2 = openstack_utils.get_swift_session_client( + cls.swift_session, + region_name='RegionTwo') + + @classmethod + @tenacity.retry( + wait=tenacity.wait_exponential(multiplier=1, min=16, max=600), + reraise=True, + stop=tenacity.stop_after_attempt(10)) + def tearDown(cls): + """Remove test resources. + + The retry decorator is needed as it is luck of the draw as to whether + a delete of a newly created container will result in a 404. Retrying + will eventually result in the delete being accepted. + """ + logging.info('Running teardown') + resp_headers, containers = cls.swift_region1.get_account() + logging.info('Found containers {}'.format(containers)) + for container in containers: + if not container['name'].startswith(cls.RESOURCE_PREFIX): + continue + for obj in cls.swift_region1.get_container(container['name'])[1]: + logging.info('Deleting object {} from {}'.format( + obj['name'], + container['name'])) + cls.swift_region1.delete_object( + container['name'], + obj['name']) + logging.info('Deleting container {}'.format(container['name'])) + cls.swift_region1.delete_container(container['name']) + + def test_901_two_regions_any_zones_two_replicas(self): + """Create an object with two replicas across two regions. + + We set write affinity to write the first copy in the local + region of the proxy used to perform the write, the other + replica will land in the remote region. + """ + swift_utils.apply_proxy_config( + self.region1_proxy_app, + { + 'write-affinity': 'r1', + 'write-affinity-node-count': '1', + 'replicas': '2'}, + self.region1_model_name) + swift_utils.apply_proxy_config( + self.region2_proxy_app, + { + 'write-affinity': 'r2', + 'write-affinity-node-count': '1', + 'replicas': '2'}, + self.region2_model_name) + logging.info('Proxy configs updated in both regions') + container_name, obj_name, obj_replicas = swift_utils.create_object( + self.swift_region1, + self.region1_proxy_app, + self.storage_topology, + self.RESOURCE_PREFIX, + model_name=self.region1_model_name) + # Check object is accessible from other regions proxy. + self.swift_region2.head_object(container_name, obj_name) + # Check there is at least one replica in each region. + self.assertEqual( + sorted(obj_replicas.distinct_regions), + [1, 2]) + # Check there are two relicas + self.assertEqual( + len(obj_replicas.all_zones), + 2) + + def test_902_two_regions_any_zones_three_replicas(self): + """Create an object with three replicas across two regions. + + We set write affinity to write the first copy in the local + region of the proxy used to perform the write, at least one + of the other two replicas will end up in the opposite region + based on primary partitions in the ring. + """ + swift_utils.apply_proxy_config( + self.region1_proxy_app, + { + 'write-affinity': 'r1', + 'write-affinity-node-count': '1', + 'replicas': '3'}, + self.region1_model_name) + swift_utils.apply_proxy_config( + self.region2_proxy_app, + { + 'write-affinity': 'r2', + 'write-affinity-node-count': '1', + 'replicas': '3'}, + self.region2_model_name) + logging.info('Proxy configs updated in both regions') + container_name, obj_name, obj_replicas = swift_utils.create_object( + self.swift_region1, + self.region1_proxy_app, + self.storage_topology, + self.RESOURCE_PREFIX, + model_name=self.region1_model_name) + # Check object is accessible from other regions proxy. + self.swift_region2.head_object(container_name, obj_name) + # Check there is at least one replica in each region. + self.assertEqual( + sorted(obj_replicas.distinct_regions), + [1, 2]) + # Check there are three relicas + self.assertEqual( + len(obj_replicas.all_zones), + 3) + + +class S3APITest(test_utils.OpenStackBaseTest): + """Test object storage S3 API.""" + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super(S3APITest, cls).setUpClass() + + session = openstack_utils.get_overcloud_keystone_session() + ks_client = openstack_utils.get_keystone_session_client(session) + + # Get token data so we can glean our user_id and project_id + token_data = ks_client.tokens.get_token_data(session.get_token()) + project_id = token_data['token']['project']['id'] + user_id = token_data['token']['user']['id'] + + # Store URL to service providing S3 compatible API + for entry in token_data['token']['catalog']: + if entry['type'] == 's3': + for endpoint in entry['endpoints']: + if endpoint['interface'] == 'public': + cls.s3_region = endpoint['region'] + cls.s3_endpoint = endpoint['url'] + + # Create AWS compatible application credentials in Keystone + cls.ec2_creds = ks_client.ec2.create(user_id, project_id) + + def test_901_s3_list_buckets(self): + """Use S3 API to list buckets.""" + # We use a mix of the high- and low-level API with common arguments + kwargs = { + 'region_name': self.s3_region, + 'aws_access_key_id': self.ec2_creds.access, + 'aws_secret_access_key': self.ec2_creds.secret, + 'endpoint_url': self.s3_endpoint, + 'verify': self.cacert, + } + s3_client = boto3.client('s3', **kwargs) + s3 = boto3.resource('s3', **kwargs) + + # Create bucket + bucket_name = 'zaza-s3' + bucket = s3.Bucket(bucket_name) + bucket.create() + + # Validate its presence + bucket_list = s3_client.list_buckets() + logging.info(pprint.pformat(bucket_list)) + for bkt in bucket_list['Buckets']: + if bkt['Name'] == bucket_name: + break + else: + AssertionError('Bucket "{}" not found'.format(bucket_name)) + + # Delete bucket + bucket.delete() + + # Validate its absence + bucket_list = s3_client.list_buckets() + logging.info(pprint.pformat(bucket_list)) + for bkt in bucket_list['Buckets']: + assert bkt['Name'] != bucket_name diff --git a/zaza/openstack/charm_tests/tempest/__init__.py b/zaza/openstack/charm_tests/tempest/__init__.py new file mode 100644 index 0000000..cf4c994 --- /dev/null +++ b/zaza/openstack/charm_tests/tempest/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and running tempest.""" diff --git a/zaza/openstack/charm_tests/tempest/setup.py b/zaza/openstack/charm_tests/tempest/setup.py new file mode 100644 index 0000000..7ae0ba3 --- /dev/null +++ b/zaza/openstack/charm_tests/tempest/setup.py @@ -0,0 +1,343 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for configuring and initializing tempest.""" + +import jinja2 +import urllib.parse +import os +import subprocess + +import zaza.utilities.deployment_env as deployment_env +import zaza.openstack.utilities.juju as juju_utils +import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.charm_tests.tempest.utils as tempest_utils +import zaza.openstack.charm_tests.glance.setup as glance_setup + +SETUP_ENV_VARS = { + 'neutron': ['TEST_GATEWAY', 'TEST_CIDR_EXT', 'TEST_FIP_RANGE', + 'TEST_NAME_SERVER', 'TEST_CIDR_PRIV'], + 'swift': ['TEST_SWIFT_IP'], +} + +IGNORABLE_VARS = ['TEST_CIDR_PRIV'] + +TEMPEST_FLAVOR_NAME = 'm1.tempest' +TEMPEST_ALT_FLAVOR_NAME = 'm2.tempest' +TEMPEST_SVC_LIST = ['ceilometer', 'cinder', 'glance', 'heat', 'horizon', + 'ironic', 'neutron', 'nova', 'octavia', 'sahara', 'swift', + 'trove', 'zaqar'] + + +def add_application_ips(ctxt): + """Add application access IPs to context. + + :param ctxt: Context dictionary + :type ctxt: dict + :returns: None + :rtype: None + """ + ctxt['keystone'] = juju_utils.get_application_ip('keystone') + ctxt['dashboard'] = juju_utils.get_application_ip('openstack-dashboard') + ctxt['ncc'] = juju_utils.get_application_ip('nova-cloud-controller') + + +def add_nova_config(ctxt, keystone_session): + """Add nova config to context. + + :param ctxt: Context dictionary + :type ctxt: dict + :param keystone_session: keystoneauth1.session.Session object + :type: keystoneauth1.session.Session + :returns: None + :rtype: None + """ + nova_client = openstack_utils.get_nova_session_client( + keystone_session) + for flavor in nova_client.flavors.list(): + if flavor.name == TEMPEST_FLAVOR_NAME: + ctxt['flavor_ref'] = flavor.id + if flavor.name == TEMPEST_ALT_FLAVOR_NAME: + ctxt['flavor_ref_alt'] = flavor.id + + +def add_neutron_config(ctxt, keystone_session): + """Add neutron config to context. + + :param ctxt: Context dictionary + :type ctxt: dict + :param keystone_session: keystoneauth1.session.Session object + :type: keystoneauth1.session.Session + :returns: None + :rtype: None + """ + current_release = openstack_utils.get_os_release() + focal_ussuri = openstack_utils.get_os_release('focal_ussuri') + neutron_client = openstack_utils.get_neutron_session_client( + keystone_session) + net = neutron_client.find_resource("network", "ext_net") + ctxt['ext_net'] = net['id'] + router = neutron_client.find_resource("router", "provider-router") + ctxt['provider_router_id'] = router['id'] + # For focal+ with OVN, we use the same settings as upstream gate. + # This is because the l3_agent_scheduler extension is only + # applicable for OVN when conventional layer-3 agent enabled: + # https://docs.openstack.org/networking-ovn/2.0.1/features.html + # This enables test_list_show_extensions to run successfully. + if current_release >= focal_ussuri: + extensions = ('address-scope,agent,allowed-address-pairs,' + 'auto-allocated-topology,availability_zone,' + 'binding,default-subnetpools,external-net,' + 'extra_dhcp_opt,multi-provider,net-mtu,' + 'network_availability_zone,network-ip-availability,' + 'port-security,provider,quotas,rbac-address-scope,' + 'rbac-policies,standard-attr-revisions,security-group,' + 'standard-attr-description,subnet_allocation,' + 'standard-attr-tag,standard-attr-timestamp,trunk,' + 'quota_details,router,extraroute,ext-gw-mode,' + 'fip-port-details,pagination,sorting,project-id,' + 'dns-integration,qos') + ctxt['neutron_api_extensions'] = extensions + else: + ctxt['neutron_api_extensions'] = 'all' + + +def add_glance_config(ctxt, keystone_session): + """Add glance config to context. + + :param ctxt: Context dictionary + :type ctxt: dict + :param keystone_session: keystoneauth1.session.Session object + :type: keystoneauth1.session.Session + :returns: None + :rtype: None + """ + glance_client = openstack_utils.get_glance_session_client( + keystone_session) + image = openstack_utils.get_images_by_name( + glance_client, glance_setup.CIRROS_IMAGE_NAME) + image_alt = openstack_utils.get_images_by_name( + glance_client, glance_setup.CIRROS_ALT_IMAGE_NAME) + if image: + ctxt['image_id'] = image[0].id + if image_alt: + ctxt['image_alt_id'] = image_alt[0].id + + +def add_cinder_config(ctxt, keystone_session): + """Add cinder config to context. + + :param ctxt: Context dictionary + :type ctxt: dict + :param keystone_session: keystoneauth1.session.Session object + :type: keystoneauth1.session.Session + :returns: None + :rtype: None + """ + volume_types = ['volumev2', 'volumev3'] + keystone_client = openstack_utils.get_keystone_session_client( + keystone_session) + for volume_type in volume_types: + service = keystone_client.services.list(type=volume_type) + if service: + ctxt['catalog_type'] = volume_type + break + + +def add_keystone_config(ctxt, keystone_session): + """Add keystone config to context. + + :param ctxt: Context dictionary + :type ctxt: dict + :param keystone_session: keystoneauth1.session.Session object + :type: keystoneauth1.session.Session + :returns: None + :rtype: None + """ + keystone_client = openstack_utils.get_keystone_session_client( + keystone_session) + domain = keystone_client.domains.find(name="admin_domain") + ctxt['default_domain_id'] = domain.id + + +def add_octavia_config(ctxt): + """Add octavia config to context. + + :param ctxt: Context dictionary + :type ctxt: dict + :returns: None + :rtype: None + :raises: subprocess.CalledProcessError + """ + subprocess.check_call([ + 'curl', + "{}:80/swift/v1/fixtures/test_server.bin".format( + ctxt['test_swift_ip']), + '-o', "{}/test_server.bin".format(ctxt['workspace_path']) + ]) + subprocess.check_call([ + 'chmod', '+x', + "{}/test_server.bin".format(ctxt['workspace_path']) + ]) + + +def get_service_list(keystone_session): + """Retrieve list of services from keystone. + + :param keystone_session: keystoneauth1.session.Session object + :type: keystoneauth1.session.Session + :returns: None + :rtype: None + """ + keystone_client = openstack_utils.get_keystone_session_client( + keystone_session) + return [s.name for s in keystone_client.services.list() if s.enabled] + + +def add_environment_var_config(ctxt, services): + """Add environment variable config to context. + + :param ctxt: Context dictionary + :type ctxt: dict + :returns: None + :rtype: None + """ + deploy_env = deployment_env.get_deployment_context() + missing_vars = [] + for svc, env_vars in SETUP_ENV_VARS.items(): + if svc in services: + for var in env_vars: + value = deploy_env.get(var) + if value: + ctxt[var.lower()] = value + else: + if var not in IGNORABLE_VARS: + missing_vars.append(var) + if missing_vars: + raise ValueError( + ("Environment variables [{}] must all be set to run this" + " test").format(', '.join(missing_vars))) + + +def add_auth_config(ctxt): + """Add authorization config to context. + + :param ctxt: Context dictionary + :type ctxt: dict + :returns: None + :rtype: None + """ + overcloud_auth = openstack_utils.get_overcloud_auth() + ctxt['proto'] = urllib.parse.urlparse(overcloud_auth['OS_AUTH_URL']).scheme + ctxt['admin_username'] = overcloud_auth['OS_USERNAME'] + ctxt['admin_password'] = overcloud_auth['OS_PASSWORD'] + if overcloud_auth['API_VERSION'] == 3: + ctxt['admin_project_name'] = overcloud_auth['OS_PROJECT_NAME'] + ctxt['admin_domain_name'] = overcloud_auth['OS_DOMAIN_NAME'] + ctxt['default_credentials_domain_name'] = ( + overcloud_auth['OS_PROJECT_DOMAIN_NAME']) + + +def get_tempest_context(workspace_path): + """Generate the tempest config context. + + :returns: Context dictionary + :rtype: dict + """ + keystone_session = openstack_utils.get_overcloud_keystone_session() + ctxt = {} + ctxt['workspace_path'] = workspace_path + ctxt_funcs = { + 'nova': add_nova_config, + 'neutron': add_neutron_config, + 'glance': add_glance_config, + 'cinder': add_cinder_config, + 'keystone': add_keystone_config} + ctxt['enabled_services'] = get_service_list(keystone_session) + if set(['cinderv2', 'cinderv3']) \ + .intersection(set(ctxt['enabled_services'])): + ctxt['enabled_services'].append('cinder') + ctxt['disabled_services'] = list( + set(TEMPEST_SVC_LIST) - set(ctxt['enabled_services'])) + add_application_ips(ctxt) + for svc_name, ctxt_func in ctxt_funcs.items(): + if svc_name in ctxt['enabled_services']: + ctxt_func(ctxt, keystone_session) + add_environment_var_config(ctxt, ctxt['enabled_services']) + add_auth_config(ctxt) + if 'octavia' in ctxt['enabled_services']: + add_octavia_config(ctxt) + return ctxt + + +def render_tempest_config(target_file, ctxt, template_name): + """Render tempest config for specified config file and template. + + :param target_file: Name of file to render config to + :type target_file: str + :param ctxt: Context dictionary + :type ctxt: dict + :param template_name: Name of template file + :type template_name: str + :returns: None + :rtype: None + """ + jenv = jinja2.Environment(loader=jinja2.PackageLoader( + 'zaza.openstack', + 'charm_tests/tempest/templates')) + template = jenv.get_template(template_name) + with open(target_file, 'w') as f: + f.write(template.render(ctxt)) + + +def setup_tempest(tempest_template, accounts_template): + """Initialize tempest and render tempest config. + + :param tempest_template: tempest.conf template + :type tempest_template: module + :param accounts_template: accounts.yaml template + :type accounts_template: module + :returns: None + :rtype: None + """ + workspace_name, workspace_path = tempest_utils.get_workspace() + tempest_utils.destroy_workspace(workspace_name, workspace_path) + tempest_utils.init_workspace(workspace_path) + context = get_tempest_context(workspace_path) + render_tempest_config( + os.path.join(workspace_path, 'etc/tempest.conf'), + context, + tempest_template) + render_tempest_config( + os.path.join(workspace_path, 'etc/accounts.yaml'), + context, + accounts_template) + + +def render_tempest_config_keystone_v2(): + """Render tempest config for Keystone V2 API. + + :returns: None + :rtype: None + """ + setup_tempest('tempest_v2.j2', 'accounts.j2') + + +def render_tempest_config_keystone_v3(): + """Render tempest config for Keystone V3 API. + + :returns: None + :rtype: None + """ + setup_tempest('tempest_v3.j2', 'accounts.j2') diff --git a/zaza/openstack/charm_tests/tempest/templates/accounts.j2 b/zaza/openstack/charm_tests/tempest/templates/accounts.j2 new file mode 100644 index 0000000..c4dd21a --- /dev/null +++ b/zaza/openstack/charm_tests/tempest/templates/accounts.j2 @@ -0,0 +1,6 @@ +- username: 'demo' + tenant_name: 'demo' + password: 'pass' +- username: 'alt_demo' + tenant_name: 'alt_demo' + password: 'secret' diff --git a/zaza/openstack/charm_tests/tempest/templates/tempest_v2.j2 b/zaza/openstack/charm_tests/tempest/templates/tempest_v2.j2 new file mode 100644 index 0000000..d4b8810 --- /dev/null +++ b/zaza/openstack/charm_tests/tempest/templates/tempest_v2.j2 @@ -0,0 +1,102 @@ +[DEFAULT] +debug = false +use_stderr = false +log_file = tempest.log + +[auth] +test_accounts_file = accounts.yaml +default_credentials_domain_name = Default +admin_username = {{ admin_username }} +admin_project_name = admin +admin_password = {{ admin_password }} +admin_domain_name = Default + +{% if 'nova' in enabled_services %} +[compute] +image_ref = {{ image_id }} +image_ref_alt = {{ image_alt_id }} +flavor_ref = {{ flavor_ref }} +flavor_ref_alt = {{ flavor_ref_alt }} +region = RegionOne +min_compute_nodes = 3 + +# TODO: review this as its release specific +# min_microversion = 2.2 +# max_microversion = latest + +[compute-feature-enabled] +console_output = true +resize = true +live_migration = true +block_migration_for_live_migration = true +attach_encrypted_volume = false +{% endif %} + +{% if 'keystone' in enabled_services %} +[identity] +uri = {{ proto }}://{{ keystone }}:5000/v2.0 +auth_version = v2 +admin_role = Admin +region = RegionOne +disable_ssl_certificate_validation = true + +[identity-feature-enabled] +api_v2 = true +api_v3 = false +{% endif %} + +{% if 'glance' in enabled_services %} +[image] +http_image = http://{{ test_swift_ip }}:80/swift/v1/images/cirros-0.3.4-x86_64-uec.tar.gz +{% endif %} + +{% if 'neutron' in enabled_services %} +[network] +{% if test_cidr_priv %} +project_network_cidr = {{ test_cidr_priv }} +{% endif %} +public_network_id = {{ ext_net }} +dns_servers = {{ test_name_server }} +project_networks_reachable = false + +[network-feature-enabled] +ipv6 = false +{% endif %} + +{% if 'heat' in enabled_services %} +[orchestration] +stack_owner_role = Admin +instance_type = m1.small +keypair_name = testkey +{% endif %} + +[oslo_concurrency] +lock_path = /tmp + +[scenario] +img_dir = /home/ubuntu/images +img_file = cirros-0.3.4-x86_64-disk.img +img_container_format = bare +img_disk_format = qcow2 + +[validation] +run_validation = true +image_ssh_user = cirros + +[service_available] +{% for svc in enabled_services -%} +{{ svc }} = true +{% endfor -%} +{% for svc in disabled_services -%} +{{ svc }} = false +{% endfor %} + +{% if 'cinder' in enabled_services %} +[volume] +backend_names = cinder-ceph +storage_protocol = ceph +catalog_type = {{ catalog_type }} + +[volume-feature-enabled] +backup = false +{% endif %} diff --git a/zaza/openstack/charm_tests/tempest/templates/tempest_v3.j2 b/zaza/openstack/charm_tests/tempest/templates/tempest_v3.j2 new file mode 100644 index 0000000..5cb65ed --- /dev/null +++ b/zaza/openstack/charm_tests/tempest/templates/tempest_v3.j2 @@ -0,0 +1,113 @@ +[DEFAULT] +debug = false +use_stderr = false +log_file = tempest.log + +[auth] +default_credentials_domain_name = {{ default_credentials_domain_name }} +admin_username = {{ admin_username }} +admin_project_name = {{ admin_project_name }} +admin_password = {{ admin_password }} +admin_domain_name = {{ admin_domain_name }} + +{% if 'nova' in enabled_services %} +[compute] +image_ref = {{ image_id }} +image_ref_alt = {{ image_alt_id }} +flavor_ref = {{ flavor_ref }} +flavor_ref_alt = {{ flavor_ref_alt }} +min_compute_nodes = 3 + +# TODO: review this as its release specific +# min_microversion = 2.2 +# max_microversion = latest + +[compute-feature-enabled] +console_output = true +resize = true +live_migration = true +block_migration_for_live_migration = true +attach_encrypted_volume = false +{% endif %} + +{% if 'keystone' in enabled_services %} +[identity] +uri = {{ proto }}://{{ keystone }}:5000/v2.0 +uri_v3 = {{ proto }}://{{ keystone }}:5000/v3 +auth_version = v3 +admin_role = Admin +region = RegionOne +default_domain_id = {{ default_domain_id }} +admin_domain_scope = true +disable_ssl_certificate_validation = true + +[identity-feature-enabled] +api_v2 = false +api_v3 = true +{% endif %} + +{% if 'glance' in enabled_services %} +[image] +http_image = http://{{ test_swift_ip }}:80/swift/v1/images/cirros-0.3.4-x86_64-uec.tar.gz +{% endif %} + +{% if 'neutron' in enabled_services %} +[network] +{% if test_cidr_priv %} +project_network_cidr = {{ test_cidr_priv }} +{% endif %} +public_network_id = {{ ext_net }} +dns_servers = {{ test_name_server }} +project_networks_reachable = false +floating_network_name = {{ ext_net }} + +[network-feature-enabled] +ipv6 = false +api_extensions = {{ neutron_api_extensions }} +port_security = true +{% endif %} + +{% if 'heat' in enabled_services %} +[orchestration] +stack_owner_role = Admin +instance_type = m1.small +keypair_name = testkey +{% endif %} + +[oslo_concurrency] +lock_path = /tmp + +[scenario] +img_dir = /home/ubuntu/images +img_file = cirros-0.3.4-x86_64-disk.img +img_container_format = bare +img_disk_format = qcow2 + +[validation] +run_validation = true +image_ssh_user = cirros + +[service_available] +{% for svc in enabled_services -%} +{{ svc }} = true +{% endfor -%} +{% for svc in disabled_services -%} +{{ svc }} = false +{% endfor %} + +{% if 'cinder' in enabled_services %} +[volume] +backend_names = cinder-ceph +storage_protocol = ceph +catalog_type = {{ catalog_type }} + +[volume-feature-enabled] +backup = false +{% endif %} + +{% if 'octavia' in enabled_services %} +[load_balancer] +enable_security_groups = true +test_with_ipv6 = false +test_server_path = {{ workspace_path }}/test_server.bin +{% endif %} \ No newline at end of file diff --git a/zaza/openstack/charm_tests/tempest/tests.py b/zaza/openstack/charm_tests/tempest/tests.py new file mode 100644 index 0000000..979caff --- /dev/null +++ b/zaza/openstack/charm_tests/tempest/tests.py @@ -0,0 +1,86 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for running tempest tests.""" + +import os +import subprocess + +import zaza +import zaza.charm_lifecycle.utils +import zaza.charm_lifecycle.test +import zaza.openstack.charm_tests.tempest.utils as tempest_utils +import tempfile + + +class TempestTest(): + """Tempest test class.""" + + test_runner = zaza.charm_lifecycle.test.DIRECT + + def run(self): + """Run tempest tests as specified in tests/tests.yaml. + + Test keys are parsed from ['tests_options']['tempest']['model'], where + valid test keys are: smoke (bool), whitelist (list of tests), blacklist + (list of tests), regex (list of regex's), and keep-workspace (bool). + + :returns: Status of tempest run + :rtype: bool + """ + result = True + charm_config = zaza.charm_lifecycle.utils.get_charm_config() + workspace_name, workspace_path = tempest_utils.get_workspace() + tempest_options = ['tempest', 'run', '--workspace', + workspace_name, '--config', + os.path.join(workspace_path, 'etc/tempest.conf')] + for model_alias in zaza.model.get_juju_model_aliases().keys(): + tempest_test_key = model_alias + if model_alias == zaza.charm_lifecycle.utils.DEFAULT_MODEL_ALIAS: + tempest_test_key = 'default' + config = charm_config['tests_options']['tempest'][tempest_test_key] + smoke = config.get('smoke') + if smoke and smoke is True: + tempest_options.extend(['--smoke']) + if config.get('regex'): + tempest_options.extend( + ['--regex', + ' '.join([reg for reg in config.get('regex')])]) + if config.get('black-regex'): + tempest_options.extend( + ['--black-regex', + ' '.join([reg for reg in config.get('black-regex')])]) + with tempfile.TemporaryDirectory() as tmpdirname: + if config.get('whitelist'): + white_file = os.path.join(tmpdirname, 'white.cfg') + with open(white_file, 'w') as f: + f.write('\n'.join(config.get('whitelist'))) + f.write('\n') + tempest_options.extend(['--whitelist-file', white_file]) + if config.get('blacklist'): + black_file = os.path.join(tmpdirname, 'black.cfg') + with open(black_file, 'w') as f: + f.write('\n'.join(config.get('blacklist'))) + f.write('\n') + tempest_options.extend(['--blacklist-file', black_file]) + print(tempest_options) + try: + subprocess.check_call(tempest_options) + except subprocess.CalledProcessError: + result = False + break + keep_workspace = config.get('keep-workspace') + if not keep_workspace or keep_workspace is not True: + tempest_utils.destroy_workspace(workspace_name, workspace_path) + return result diff --git a/zaza/openstack/charm_tests/tempest/utils.py b/zaza/openstack/charm_tests/tempest/utils.py new file mode 100644 index 0000000..52ae126 --- /dev/null +++ b/zaza/openstack/charm_tests/tempest/utils.py @@ -0,0 +1,67 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utility code for working with tempest workspaces.""" + +import os +from pathlib import Path +import shutil +import subprocess + +import zaza.model as model + + +def get_workspace(): + """Get tempest workspace name and path. + + :returns: A tuple containing tempest workspace name and workspace path + :rtype: Tuple[str, str] + """ + home = str(Path.home()) + workspace_name = model.get_juju_model() + workspace_path = os.path.join(home, '.tempest', workspace_name) + return (workspace_name, workspace_path) + + +def destroy_workspace(workspace_name, workspace_path): + """Delete tempest workspace. + + :param workspace_name: name of workspace + :type workspace_name: str + :param workspace_path: directory path where workspace is stored + :type workspace_path: str + :returns: None + :rtype: None + """ + try: + subprocess.check_call(['tempest', 'workspace', 'remove', '--rmdir', + '--name', workspace_name]) + except (subprocess.CalledProcessError, FileNotFoundError): + pass + if os.path.isdir(workspace_path): + shutil.rmtree(workspace_path) + + +def init_workspace(workspace_path): + """Initialize tempest workspace. + + :param workspace_path: directory path where workspace is stored + :type workspace_path: str + :returns: None + :rtype: None + """ + try: + subprocess.check_call(['tempest', 'init', workspace_path]) + except subprocess.CalledProcessError: + pass diff --git a/zaza/openstack/charm_tests/test_utils.py b/zaza/openstack/charm_tests/test_utils.py index 88687c3..bc67933 100644 --- a/zaza/openstack/charm_tests/test_utils.py +++ b/zaza/openstack/charm_tests/test_utils.py @@ -11,22 +11,30 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Module containg base class for implementing charm tests.""" +"""Module containing base class for implementing charm tests.""" import contextlib import logging +import subprocess +import sys +import tenacity import unittest -import zaza.model +import yaml + +import novaclient import zaza.model as model import zaza.charm_lifecycle.utils as lifecycle_utils +import zaza.openstack.configure.guest as configure_guest import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.utilities.generic as generic_utils +import zaza.openstack.charm_tests.glance.setup as glance_setup def skipIfNotHA(service_name): """Run decorator to skip tests if application not in HA configuration.""" def _skipIfNotHA_inner_1(f): def _skipIfNotHA_inner_2(*args, **kwargs): - ips = zaza.model.get_app_ips( + ips = model.get_app_ips( service_name) if len(ips) > 1: return f(*args, **kwargs) @@ -38,6 +46,25 @@ def skipIfNotHA(service_name): return _skipIfNotHA_inner_1 +def skipUntilVersion(service, package, release): + """Run decorator to skip this test if application version is too low.""" + def _skipUntilVersion_inner_1(f): + def _skipUntilVersion_inner_2(*args, **kwargs): + package_version = generic_utils.get_pkg_version(service, package) + try: + subprocess.check_call(['dpkg', '--compare-versions', + package_version, 'ge', release], + stderr=subprocess.STDOUT, + universal_newlines=True) + return f(*args, **kwargs) + except subprocess.CalledProcessError: + logging.warn("Skipping test for older ({})" + "service {}, requested {}".format( + package_version, service, release)) + return _skipUntilVersion_inner_2 + return _skipUntilVersion_inner_1 + + def audit_assertions(action, expected_passes, expected_failures=None, @@ -49,7 +76,7 @@ def audit_assertions(action, :param expected_passes: List of test names that are expected to pass :type expected_passes: List[str] :param expected_failures: List of test names that are expected to fail - :type expexted_failures: List[str] + :type expected_failures: List[str] :raises: AssertionError if the assertion fails. """ if expected_failures is None: @@ -69,27 +96,174 @@ def audit_assertions(action, assert value == "PASS", "Unexpected failure: {}".format(key) -class OpenStackBaseTest(unittest.TestCase): - """Generic helpers for testing OpenStack API charms.""" +class BaseCharmTest(unittest.TestCase): + """Generic helpers for testing charms.""" + + run_resource_cleanup = False + + def resource_cleanup(self): + """Cleanup any resources created during the test run. + + Override this method with a method which removes any resources + which were created during the test run. If the test sets + "self.run_resource_cleanup = True" then cleanup will be + performed. + """ + pass + + # this must be a class instance method otherwise descentents will not be + # able to influence if cleanup should be run. + def tearDown(self): + """Run teardown for test class.""" + if self.run_resource_cleanup: + logging.info('Running resource cleanup') + self.resource_cleanup() @classmethod - def setUpClass(cls, application_name=None): - """Run setup for test class to create common resourcea.""" - cls.keystone_session = openstack_utils.get_overcloud_keystone_session() - cls.model_name = model.get_juju_model() - cls.test_config = lifecycle_utils.get_charm_config() + def setUpClass(cls, application_name=None, model_alias=None): + """Run setup for test class to create common resources. + + Note: the derived class may not use the application_name; if it's set + to None then this setUpClass() method will attempt to extract the + application name from the charm_config (essentially the test.yaml) + using the key 'charm_name' in the test_config. If that isn't present, + then there will be no application_name set, and this is considered a + generic scenario of a whole model rather than a particular charm under + test. + + :param application_name: the name of the applications that the derived + class is testing. If None, then it's a generic test not connected + to any single charm. + :type application_name: Optional[str] + :param model_alias: the alias to use if needed. + :type model_alias: Optional[str] + """ + cls.model_aliases = model.get_juju_model_aliases() + if model_alias: + cls.model_name = cls.model_aliases[model_alias] + else: + cls.model_name = model.get_juju_model() + cls.test_config = lifecycle_utils.get_charm_config(fatal=False) + if application_name: cls.application_name = application_name else: - cls.application_name = cls.test_config['charm_name'] + try: + charm_under_test_name = cls.test_config['charm_name'] + except KeyError: + logging.warning("No application_name and no charm config so " + "not setting the application_name. Likely a " + "scenario test.") + return + deployed_app_names = model.sync_deployed(model_name=cls.model_name) + if charm_under_test_name in deployed_app_names: + # There is an application named like the charm under test. + # Let's consider it the application under test: + cls.application_name = charm_under_test_name + else: + # Let's search for any application whose name starts with the + # name of the charm under test and assume it's the application + # under test: + for app_name in deployed_app_names: + if app_name.startswith(charm_under_test_name): + cls.application_name = app_name + break + else: + logging.warning('Could not find application under test') + return + cls.lead_unit = model.get_lead_unit_name( cls.application_name, model_name=cls.model_name) logging.debug('Leader unit is {}'.format(cls.lead_unit)) + def config_current_separate_non_string_type_keys( + self, non_string_type_keys, config_keys=None, + application_name=None): + """Obtain current config and the non-string type config separately. + + If the charm config option is not string, it will not accept being + reverted back in "config_change()" method if the current value is None. + Therefore, obtain the current config and separate those out, so they + can be used for a separate invocation of "config_change()" with + reset_to_charm_default set to True. + + :param config_keys: iterable of strs to index into the current config. + If None, return all keys from the config + :type config_keys: Optional[Iterable[str]] + :param non_string_type_keys: list of non-string type keys to be + separated out only if their current value + is None + :type non_string_type_keys: list + :param application_name: String application name for use when called + by a charm under test other than the object's + application. + :type application_name: Optional[str] + :return: Dictionary of current charm configs without the + non-string type keys provided, and dictionary of the + non-string keys found in the supplied config_keys list. + :rtype: Dict[str, Any], Dict[str, None] + """ + current_config = self.config_current(application_name, config_keys) + non_string_type_config = {} + if config_keys is None: + config_keys = list(current_config.keys()) + for key in config_keys: + # We only care if the current value is None, otherwise it will + # not face issues being reverted by "config_change()" + if key in non_string_type_keys and current_config[key] is None: + non_string_type_config[key] = None + current_config.pop(key) + + return current_config, non_string_type_config + + def config_current(self, application_name=None, keys=None): + """Get Current Config of an application normalized into key-values. + + :param application_name: String application name for use when called + by a charm under test other than the object's + application. + :type application_name: Optional[str] + :param keys: iterable of strs to index into the current config. If + None, return all keys from the config + :type keys: Optional[Iterable[str]] + :return: Dictionary of requested config from application + :rtype: Dict[str, Any] + """ + if not application_name: + application_name = self.application_name + + _app_config = model.get_application_config(application_name) + + keys = keys or _app_config.keys() + return { + k: _app_config.get(k, {}).get('value') + for k in keys + } + + @staticmethod + def _stringed_value_config(config): + """Stringify values in a dict. + + Workaround: + libjuju refuses to accept data with types other than strings + through the zaza.model.set_application_config + + :param config: Config dictionary with any typed values + :type config: Dict[str,Any] + :return: Config Dictionary with string-ly typed values + :rtype: Dict[str,str] + """ + # if v is None, stringify to '' + # otherwise use a strict cast with str(...) + return { + k: '' if v is None else str(v) + for k, v in config.items() + } + @contextlib.contextmanager def config_change(self, default_config, alternate_config, - application_name=None): + application_name=None, reset_to_charm_default=False): """Run change config tests. Change config to `alternate_config`, wait for idle workload status, @@ -109,20 +283,23 @@ class OpenStackBaseTest(unittest.TestCase): by a charm under test other than the object's application. :type application_name: str + :param reset_to_charm_default: When True we will ask Juju to reset each + configuration option mentioned in the + `alternate_config` dictionary back to + the charm default and ignore the + `default_config` dictionary. + :type reset_to_charm_default: bool """ if not application_name: application_name = self.application_name + # we need to compare config values to what is already applied before # attempting to set them. otherwise the model will behave differently # than we would expect while waiting for completion of the change - _app_config = model.get_application_config(application_name) - app_config = {} - # convert the more elaborate config structure from libjuju to something - # we can compare to what the caller supplies to this function - for k in alternate_config.keys(): - # note that conversion to string for all values is due to - # attempting to set any config with other types lead to Traceback - app_config[k] = str(_app_config.get(k, {}).get('value', '')) + app_config = self.config_current( + application_name, keys=alternate_config.keys() + ) + if all(item in app_config.items() for item in alternate_config.items()): logging.debug('alternate_config equals what is already applied ' @@ -139,7 +316,7 @@ class OpenStackBaseTest(unittest.TestCase): .format(alternate_config)) model.set_application_config( application_name, - alternate_config, + self._stringed_value_config(alternate_config), model_name=self.model_name) logging.debug( @@ -156,12 +333,28 @@ class OpenStackBaseTest(unittest.TestCase): yield - logging.debug('Restoring charm setting to {}'.format(default_config)) - model.set_application_config( - application_name, - default_config, - model_name=self.model_name) + if reset_to_charm_default: + logging.debug('Resetting these charm configuration options to the ' + 'charm default: "{}"' + .format(alternate_config.keys())) + model.reset_application_config(application_name, + list(alternate_config.keys()), + model_name=self.model_name) + elif default_config == alternate_config: + logging.debug('default_config == alternate_config, not attempting ' + ' to restore configuration') + return + else: + logging.debug('Restoring charm setting to {}' + .format(default_config)) + model.set_application_config( + application_name, + self._stringed_value_config(default_config), + model_name=self.model_name) + logging.debug( + 'Waiting for units to execute config-changed hook') + model.wait_for_agent_status(model_name=self.model_name) logging.debug( 'Waiting for units to reach target states') model.wait_for_application_states( @@ -170,12 +363,50 @@ class OpenStackBaseTest(unittest.TestCase): # TODO: Optimize with a block on a specific application until idle. model.block_until_all_units_idle() + def restart_on_changed_debug_oslo_config_file(self, config_file, services, + config_section='DEFAULT'): + """Check restart happens on config change by flipping debug mode. + + Change debug mode and assert that change propagates to the correct + file and that services are restarted as a result. config_file must be + an oslo config file and debug option must be set in the + `config_section` section. + + :param config_file: OSLO Config file to check for settings + :type config_file: str + :param services: Services expected to be restarted when config_file is + changed. + :type services: list + """ + # Expected default and alternate values + current_value = model.get_application_config( + self.application_name)['debug']['value'] + new_value = str(not bool(current_value)).title() + current_value = str(current_value).title() + + set_default = {'debug': current_value} + set_alternate = {'debug': new_value} + default_entry = {config_section: {'debug': [current_value]}} + alternate_entry = {config_section: {'debug': [new_value]}} + + # Make config change, check for service restarts + logging.info( + 'Changing settings on {} to {}'.format( + self.application_name, set_alternate)) + self.restart_on_changed( + config_file, + set_default, + set_alternate, + default_entry, + alternate_entry, + services) + def restart_on_changed(self, config_file, default_config, alternate_config, default_entry, alternate_entry, services, pgrep_full=False): """Run restart on change tests. - Test that changing config results in config file being updates and + Test that changing config results in config file being updated and services restarted. Return config to default_config afterwards :param config_file: Config file to check for settings @@ -233,7 +464,7 @@ class OpenStackBaseTest(unittest.TestCase): # If this is not an OSLO config file set default_config={} if default_entry: logging.debug( - 'Waiting for updates to propagate to '.format(config_file)) + 'Waiting for updates to propagate to {}'.format(config_file)) model.block_until_oslo_config_entries_match( self.application_name, config_file, @@ -249,8 +480,8 @@ class OpenStackBaseTest(unittest.TestCase): Pause and then resume a unit checking that services are in the required state after each action - :param services: Services expected to be restarted when config_file is - changed. + :param services: Services expected to be restarted when the unit is + paused/resumed. :type services: list :param pgrep_full: Should pgrep be used rather than pidof to identify a service. @@ -266,10 +497,10 @@ class OpenStackBaseTest(unittest.TestCase): self.lead_unit, 'active', model_name=self.model_name) - model.run_action( + generic_utils.assertActionRanOK(model.run_action( self.lead_unit, 'pause', - model_name=self.model_name) + model_name=self.model_name)) model.block_until_unit_wl_status( self.lead_unit, 'maintenance', @@ -282,10 +513,10 @@ class OpenStackBaseTest(unittest.TestCase): model_name=self.model_name, pgrep_full=pgrep_full) yield - model.run_action( + generic_utils.assertActionRanOK(model.run_action( self.lead_unit, 'resume', - model_name=self.model_name) + model_name=self.model_name)) model.block_until_unit_wl_status( self.lead_unit, 'active', @@ -297,3 +528,600 @@ class OpenStackBaseTest(unittest.TestCase): 'running', model_name=self.model_name, pgrep_full=pgrep_full) + + def get_my_tests_options(self, key, default=None): + """Retrieve tests_options for specific test. + + Prefix for key is built from dot-notated absolute path to calling + method or function. + + Example: + # In tests.yaml: + tests_options: + zaza.charm_tests.noop.tests.NoopTest.test_foo.key: true + # called from zaza.charm_tests.noop.tests.NoopTest.test_foo() + >>> get_my_tests_options('key') + True + + :param key: Suffix for tests_options key. + :type key: str + :param default: Default value to return if key is not found. + :type default: any + :returns: Value associated with key in tests_options. + :rtype: any + """ + # note that we need to do this in-line otherwise we would get the path + # to ourself. I guess we could create a common method that would go two + # frames back, but that would be kind of useless for anyone else than + # this method. + caller_path = [] + + # get path to module + caller_path.append(sys.modules[ + sys._getframe().f_back.f_globals['__name__']].__name__) + + # attempt to get class name + try: + caller_path.append( + sys._getframe().f_back.f_locals['self'].__class__.__name__) + except KeyError: + pass + + # get method or function name + caller_path.append(sys._getframe().f_back.f_code.co_name) + + return self.test_config.get('tests_options', {}).get( + '.'.join(caller_path + [key]), default) + + def get_applications_with_substring_in_name(self, substring): + """Get applications with substring in name. + + :param substring: String to search for in application names + :type substring: str + :returns: List of matching applictions + :rtype: List + """ + status = model.get_status().applications + applications = [] + for application in status.keys(): + if substring in application: + applications.append(application) + return applications + + def run_update_status_hooks(self, units): + """Run update status hooks on units. + + :param units: List of unit names or unit.entity_id + :type units: List[str] + :returns: None + :rtype: None + """ + for unit in units: + model.run_on_unit(unit, "hooks/update-status") + + +class OpenStackBaseTest(BaseCharmTest): + """Generic helpers for testing OpenStack API charms.""" + + @classmethod + def setUpClass(cls, application_name=None, model_alias=None): + """Run setup for test class to create common resources.""" + super(OpenStackBaseTest, cls).setUpClass(application_name, model_alias) + cls.keystone_session = openstack_utils.get_overcloud_keystone_session( + model_name=cls.model_name) + cls.cacert = openstack_utils.get_cacert() + cls.nova_client = ( + openstack_utils.get_nova_session_client(cls.keystone_session)) + + def resource_cleanup(self): + """Remove test resources.""" + try: + logging.info('Removing instances launched by test ({}*)' + .format(self.RESOURCE_PREFIX)) + for server in self.nova_client.servers.list(): + if server.name.startswith(self.RESOURCE_PREFIX): + openstack_utils.delete_resource( + self.nova_client.servers, + server.id, + msg="server") + except AssertionError as e: + # Resource failed to be removed within the expected time frame, + # log this fact and carry on. + logging.warning('Gave up waiting for resource cleanup: "{}"' + .format(str(e))) + except AttributeError: + # Test did not define self.RESOURCE_PREFIX, ignore. + pass + + def launch_guest(self, guest_name, userdata=None, use_boot_volume=False, + instance_key=None): + """Launch two guests to use in tests. + + Note that it is up to the caller to have set the RESOURCE_PREFIX class + variable prior to calling this method. + + Also note that this method will remove any already existing instance + with same name as what is requested. + + :param guest_name: Name of instance + :type guest_name: str + :param userdata: Userdata to attach to instance + :type userdata: Optional[str] + :param use_boot_volume: Whether to boot guest from a shared volume. + :type use_boot_volume: boolean + :param instance_key: Key to collect associated config data with. + :type instance_key: Optional[str] + :returns: Nova instance objects + :rtype: Server + """ + instance_key = instance_key or glance_setup.LTS_IMAGE_NAME + instance_name = '{}-{}'.format(self.RESOURCE_PREFIX, guest_name) + + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(3), + wait=tenacity.wait_exponential( + multiplier=1, min=2, max=10)): + with attempt: + old_instance_with_same_name = self.retrieve_guest( + instance_name) + if old_instance_with_same_name: + logging.info( + 'Removing already existing instance ({}) with ' + 'requested name ({})' + .format(old_instance_with_same_name.id, instance_name)) + openstack_utils.delete_resource( + self.nova_client.servers, + old_instance_with_same_name.id, + msg="server") + + return configure_guest.launch_instance( + instance_key, + vm_name=instance_name, + use_boot_volume=use_boot_volume, + userdata=userdata) + + def launch_guests(self, userdata=None): + """Launch two guests to use in tests. + + Note that it is up to the caller to have set the RESOURCE_PREFIX class + variable prior to calling this method. + + :param userdata: Userdata to attach to instance + :type userdata: Optional[str] + :returns: List of launched Nova instance objects + :rtype: List[Server] + """ + launched_instances = [] + for guest_number in range(1, 2+1): + launched_instances.append( + self.launch_guest( + guest_name='ins-{}'.format(guest_number), + userdata=userdata)) + return launched_instances + + def retrieve_guest(self, guest_name): + """Return guest matching name. + + :param nova_client: Nova client to use when checking status + :type nova_client: Nova client + :returns: the matching guest + :rtype: Union[novaclient.Server, None] + """ + try: + return self.nova_client.servers.find(name=guest_name) + except novaclient.exceptions.NotFound: + return None + + def retrieve_guests(self): + """Return test guests. + + Note that it is up to the caller to have set the RESOURCE_PREFIX class + variable prior to calling this method. + + :param nova_client: Nova client to use when checking status + :type nova_client: Nova client + :returns: the matching guest + :rtype: Union[novaclient.Server, None] + """ + instance_1 = self.retrieve_guest( + '{}-ins-1'.format(self.RESOURCE_PREFIX)) + instance_2 = self.retrieve_guest( + '{}-ins-1'.format(self.RESOURCE_PREFIX)) + return instance_1, instance_2 + + +class BaseDeferredRestartTest(BaseCharmTest): + """Check deferred restarts. + + Example of adding a deferred restart test:: + + class NeutronOVSDeferredRestartTest( + test_utils.BaseDeferredRestartTest): + + @classmethod + def setUpClass(cls): + super().setUpClass(application_name='neutron-openvswitch') + + def run_tests(self): + # Trigger a config change which triggers a deferred hook. + self.run_charm_change_hook_test('config-changed') + + # Trigger a package change which requires a restart + self.run_package_change_test( + 'openvswitch-switch', + 'openvswitch-switch') + + + NOTE: The test has been broken into various class methods which may require + specialisation if the charm being tested is not a standard OpenStack + charm e.g. `trigger_deferred_hook_via_charm` if the charm is not + an oslo config or does not have a debug option. + """ + + @classmethod + def setUpClass(cls, application_name): + """Run test setup. + + :param application_name: Name of application to run tests against. + :type application_name: str + """ + cls.application_name = application_name + super().setUpClass(application_name=cls.application_name) + + def check_status_message_is_clear(self): + """Check each units status message show no defeerred events.""" + # Check workload status no longer shows deferred restarts. + for unit in model.get_units(self.application_name): + model.block_until_unit_wl_message_match( + unit.entity_id, + 'Unit is ready') + model.block_until_all_units_idle() + + def check_clear_restarts(self): + """Clear and deferred restarts and check status. + + Clear and deferred restarts and then check the workload status message + for each unit. + """ + # Use action to run any deferred restarts + for unit in model.get_units(self.application_name): + logging.info("Running restart-services on {}".format( + unit.entity_id)) + model.run_action( + unit.entity_id, + 'restart-services', + action_params={'deferred-only': True}, + raise_on_failure=True) + + # Check workload status no longer shows deferred restarts. + self.check_status_message_is_clear() + + def clear_hooks(self): + """Clear and deferred hooks. + + Run any deferred hooks. + """ + # Use action to run any deferred restarts + for unit in model.get_units(self.application_name): + logging.info("Running run-deferred-hooks on {}".format( + unit.entity_id)) + model.run_action( + unit.entity_id, + 'run-deferred-hooks', + raise_on_failure=True) + + def check_clear_hooks(self): + """Clear deferred hooks and check status. + + Clear deferred hooks and then check the workload status message + for each unit. + """ + self.clear_hooks() + # Check workload status no longer shows deferred restarts. + self.check_status_message_is_clear() + + def run_show_deferred_events_action(self): + """Run show-deferred-events and return results. + + :returns: Data from action run + :rtype: Dict + """ + unit = model.get_units(self.application_name)[0] + action = model.run_action( + unit.entity_id, + 'show-deferred-events', + raise_on_failure=True) + return yaml.safe_load(action.data['results']['output']) + + def check_show_deferred_events_action_restart(self, test_service, + restart_reason): + """Check the output from the action to list deferred restarts. + + Run the action to list any deferred restarts and check it has entry for + the given service and reason. + + :param test_service: Service that should need a restart + :type test_service: str + :param restart_reason: The reason the action should list for the + service needing to be restarted. This can be a + substring. + :type restart_reason: str + """ + # Ensure that the deferred restart and cause are listed via action + logging.info( + ("Checking {} is marked as needing restart in " + "show-deferred-events action").format( + test_service)) + for event in self.run_show_deferred_events_action()['restarts']: + logging.info("{} in {} and {} in {}".format( + test_service, + event, + restart_reason, + event)) + if test_service in event and restart_reason in event: + break + else: + msg = 'No entry for restart of {} for reason {} found'.format( + test_service, + restart_reason) + raise Exception(msg) + + def check_show_deferred_events_action_hook(self, hook): + """Check the output from the action to list deferred eveents. + + Run the action to list any deferred events and check it has entry for + the given hook. + + :param hook: Hook or method name + :type hook: str + """ + # Ensure that the deferred restart and cause are listed via action + logging.info( + ("Checking {} is marked as skipped in " + "show-deferred-events action").format(hook)) + for event in self.run_show_deferred_events_action()['hooks']: + logging.info("{} in {}".format(hook, event)) + if hook in event: + break + else: + msg = '{} not found in {}'.format(hook, event) + raise Exception(msg) + + def check_show_deferred_restarts_wlm(self, test_service): + """Check the workload status message lists deferred restart. + + :param test_service: Service that should need a restart + :type test_service: str + """ + # Ensure that the deferred restarts are visible in Juju status + for unit in model.get_units(self.application_name): + # Just checking one example service should we be checking all? + logging.info( + ("Checking {} is marked as needing restart in workload " + "message of {}".format(test_service, unit.entity_id))) + assert test_service in unit.workload_status_message + + def check_deferred_hook_wlm(self, deferred_hook): + """Check the workload status message lists deferred event. + + :param deferred_hook: Hook or method name which should be showing as + deferred. + :type deferred_hook: str + """ + # Ensure that the deferred restarts are visible in Juju status + for unit in model.get_units(self.application_name): + logging.info( + ("Checking {} is marked as having deferred hook in workload " + "message".format(unit.entity_id))) + assert deferred_hook in unit.workload_status_message + + def get_new_config(self): + """Return the config key and new value to trigger a hook execution. + + NOTE: The implementation assumes the charm has a `debug` option and + If that is not true the derived class should override this + method. + :returns: Config key and new value + :rtype: (str, bool) + """ + app_config = model.get_application_config(self.application_name) + return 'debug', str(not app_config['debug']['value']) + + def set_new_config(self): + """Change applications charm config.""" + logging.info("Triggering deferred restart via config change") + config_key, new_value = self.get_new_config() + logging.info("Setting {}: {}".format(config_key, new_value)) + model.set_application_config( + self.application_name, + {config_key: new_value}) + return new_value + + def trigger_deferred_restart_via_charm(self, restart_config_file): + """Set charm config option which requires a service start. + + Set the charm debug option and wait for that change to be renderred in + applications config file. + + NOTE: The implementation assumes the restart_config_file in an oslo + config file. If that is not true the derived class should + override this method. + + :param restart_config_file: Config file that updated value is expected + in. + :type restart_config_file: str + """ + new_debug_value = self.set_new_config() + expected_contents = { + 'DEFAULT': { + 'debug': [new_debug_value]}} + logging.info("Waiting for debug to be {} in {}".format( + new_debug_value, + restart_config_file)) + model.block_until_oslo_config_entries_match( + self.application_name, + restart_config_file, + expected_contents) + logging.info("Waiting for units to be idle") + model.block_until_all_units_idle() + + def trigger_deferred_hook_via_charm(self, deferred_hook): + """Set charm config option which requires a service start. + + Set the charm debug option and wait for that change to be rendered in + applications config file. + + :param deferred_hook: Hook or method name which should be showing as + deferred. + :type deferred_hook: str + :returns: New config value + :rtype: Union[str, int, float] + """ + new_debug_value = self.set_new_config() + for unit in model.get_units(self.application_name): + logging.info('Waiting for {} to show deferred hook'.format( + unit.entity_id)) + model.block_until_unit_wl_message_match( + unit.entity_id, + status_pattern='.*{}.*'.format(deferred_hook)) + logging.info("Waiting for units to be idle") + model.block_until_all_units_idle() + return new_debug_value + + def trigger_deferred_restart_via_package(self, restart_package): + """Update a package which requires a service restart. + + :param restart_package: Package that will be changed to trigger a + service restart. + :type restart_package: str + """ + logging.info("Triggering deferred restart via package change") + # Test restart requested by package + for unit in model.get_units(self.application_name): + model.run_on_unit( + unit.entity_id, + ('dpkg-reconfigure {}; ' + 'JUJU_HOOK_NAME=update-status ./hooks/update-status').format( + restart_package)) + + def run_charm_change_restart_test(self, test_service, restart_config_file): + """Trigger a deferred restart by updating a config file via the charm. + + Trigger a hook in the charm which the charm will defer. + + :param test_service: Service that should need a restart + :type test_service: str + :param restart_config_file: Config file that updated value is expected + in. + :type restart_config_file: str + """ + self.trigger_deferred_restart_via_charm(restart_config_file) + + self.check_show_deferred_restarts_wlm(test_service) + self.check_show_deferred_events_action_restart( + test_service, + restart_config_file) + logging.info("Running restart action to clear deferred restarts") + self.check_clear_restarts() + + def run_charm_change_hook_test(self, deferred_hook): + """Trigger a deferred restart by updating a config file via the charm. + + :param deferred_hook: Hook or method name which should be showing as + defeerred. + :type deferred_hook: str + """ + self.trigger_deferred_hook_via_charm(deferred_hook) + + self.check_deferred_hook_wlm(deferred_hook) + self.check_show_deferred_events_action_hook(deferred_hook) + # Rerunning to flip config option back to previous value. + self.trigger_deferred_hook_via_charm(deferred_hook) + logging.info("Running restart action to clear deferred hooks") + # If there are a number of units in the application and restarts take + # time then another deferred hook can occur so do not block on a + # clear status message. + self.clear_hooks() + + def run_package_change_test(self, restart_package, restart_package_svc): + """Trigger a deferred restart by updating a package. + + Update a package which requires will add a deferred restart. + + :param restart_package: Package that will be changed to trigger a + service restart. + :type restart_package: str + :param restart_package_service: Service that will require a restart + after restart_package has changed. + :type restart_package_service: str + """ + self.trigger_deferred_restart_via_package(restart_package) + + self.check_show_deferred_restarts_wlm(restart_package_svc) + self.check_show_deferred_events_action_restart( + restart_package_svc, + 'Package update') + logging.info("Running restart action to clear deferred restarts") + self.check_clear_restarts() + + def run_tests(self): + """Run charm tests. should specify which tests to run. + + The charm test that implements this test should specify which tests to + run, for example: + + def run_tests(self): + # Trigger a config change which triggers a deferred hook. + self.run_charm_change_hook_test('config-changed') + + # Trigger a config change which requires a restart + self.run_charm_change_restart_test( + 'neutron-l3-agent', + '/etc/neutron/neutron.conf') + + # Trigger a package change which requires a restart + self.run_package_change_test( + 'openvswitch-switch', + 'openvswitch-switch') + """ + raise NotImplementedError + + def test_deferred_restarts(self): + """Run deferred restart tests.""" + app_config = model.get_application_config(self.application_name) + auto_restart_config_key = 'enable-auto-restarts' + if auto_restart_config_key not in app_config: + raise unittest.SkipTest("Deferred restarts not implemented") + + # Ensure auto restarts are off. + policy_file = '/etc/policy-rc.d/charm-{}.policy'.format( + self.application_name) + if app_config[auto_restart_config_key]['value']: + logging.info("Turning off auto restarts") + model.set_application_config( + self.application_name, {auto_restart_config_key: 'False'}) + logging.info("Waiting for {} to appear on units of {}".format( + policy_file, + self.application_name)) + model.block_until_file_has_contents( + self.application_name, + policy_file, + 'policy_requestor_name') + # The block_until_file_has_contents ensures the change we waiting + # for has happened, now just wait for any hooks to finish. + logging.info("Waiting for units to be idle") + model.block_until_all_units_idle() + else: + logging.info("Auto restarts already disabled") + + self.run_tests() + + # Finished so turn auto-restarts back on. + logging.info("Turning on auto restarts") + model.set_application_config( + self.application_name, {auto_restart_config_key: 'True'}) + model.block_until_file_missing( + self.application_name, + policy_file) + model.block_until_all_units_idle() + self.check_clear_hooks() diff --git a/zaza/openstack/charm_tests/trilio/__init__.py b/zaza/openstack/charm_tests/trilio/__init__.py new file mode 100644 index 0000000..d22e570 --- /dev/null +++ b/zaza/openstack/charm_tests/trilio/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of code for setting up and testing TrilioVault.""" diff --git a/zaza/openstack/charm_tests/trilio/setup.py b/zaza/openstack/charm_tests/trilio/setup.py new file mode 100644 index 0000000..11fe187 --- /dev/null +++ b/zaza/openstack/charm_tests/trilio/setup.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python3 + +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Code for configuring Trilio.""" + +import logging +import os + +import boto3 + +import zaza.charm_lifecycle.utils as lifecycle_utils +import zaza.model as zaza_model +import zaza.openstack.utilities.juju as juju_utils +import zaza.openstack.utilities.generic as generic_utils +import zaza.openstack.utilities.openstack as openstack_utils + + +def nfs_setup(): + """Run setup for testing Trilio. + + Setup for testing Trilio is currently part of functional + tests. + """ + logging.info("Configuring NFS Server") + nfs_server_ip = zaza_model.get_app_ips("nfs-server-test-fixture")[0] + trilio_wlm_unit = zaza_model.get_first_unit_name("trilio-wlm") + + nfs_shares_conf = {"nfs-shares": "{}:/srv/testing".format(nfs_server_ip)} + logging.info("NFS share config: {}".format(nfs_shares_conf)) + _trilio_services = ["trilio-wlm", "trilio-data-mover"] + + conf_changed = False + for juju_service in _trilio_services: + app_config = zaza_model.get_application_config(juju_service) + if app_config["nfs-shares"] != nfs_shares_conf["nfs-shares"]: + logging.info("Updating nfs-shares config option") + zaza_model.set_application_config(juju_service, nfs_shares_conf) + conf_changed = True + + if conf_changed: + zaza_model.wait_for_agent_status() + # NOTE(jamespage): wlm-api service must be running in order + # to execute the setup actions + zaza_model.block_until_service_status( + unit_name=trilio_wlm_unit, + services=["wlm-api"], + target_status="active", + ) + + +def trust_setup(): + """Run setup Trilio trust setup.""" + logging.info("Executing create-cloud-admin-trust") + password = juju_utils.leader_get("keystone", "admin_passwd") + + generic_utils.assertActionRanOK( + zaza_model.run_action_on_leader( + "trilio-wlm", + "create-cloud-admin-trust", + raise_on_failure=True, + action_params={"password": password}, + ) + ) + + +def license_setup(): + """Run setup Trilio license setup.""" + logging.info("Executing create-license") + test_license = os.environ.get("TEST_TRILIO_LICENSE") + if test_license and os.path.exists(test_license): + zaza_model.attach_resource("trilio-wlm", + resource_name='license', + resource_path=test_license) + generic_utils.assertActionRanOK( + zaza_model.run_action_on_leader( + "trilio-wlm", "create-license", + raise_on_failure=True + ) + ) + + else: + logging.error("Unable to find Trilio License file") + + +def s3_setup(): + """Run setup of s3 options for Trilio.""" + session = openstack_utils.get_overcloud_keystone_session() + ks_client = openstack_utils.get_keystone_session_client( + session) + + # Get token data so we can glean our user_id and project_id + token_data = ks_client.tokens.get_token_data(session.get_token()) + project_id = token_data['token']['project']['id'] + user_id = token_data['token']['user']['id'] + + # Store URL to service providing S3 compatible API + for entry in token_data['token']['catalog']: + if entry['type'] == 's3': + for endpoint in entry['endpoints']: + if endpoint['interface'] == 'public': + s3_region = endpoint['region'] + s3_endpoint = endpoint['url'] + + # Create AWS compatible application credentials in Keystone + ec2_creds = ks_client.ec2.create(user_id, project_id) + cacert = openstack_utils.get_cacert() + kwargs = { + 'region_name': s3_region, + 'aws_access_key_id': ec2_creds.access, + 'aws_secret_access_key': ec2_creds.secret, + 'endpoint_url': s3_endpoint, + 'verify': cacert, + } + s3 = boto3.resource('s3', **kwargs) + + # Create bucket + bucket_name = 'zaza-trilio' + logging.info("Creating bucket: {}".format(bucket_name)) + bucket = s3.Bucket(bucket_name) + bucket.create() + + s3_config = { + 'tv-s3-secret-key': ec2_creds.secret, + 'tv-s3-access-key': ec2_creds.access, + 'tv-s3-region-name': s3_region, + 'tv-s3-bucket': bucket_name, + 'tv-s3-endpoint-url': s3_endpoint} + for app in ['trilio-wlm', 'trilio-data-mover']: + logging.info("Setting s3 config for {}".format(app)) + zaza_model.set_application_config(app, s3_config) + test_config = lifecycle_utils.get_charm_config(fatal=False) + states = test_config.get('target_deploy_status', {}) + states['trilio-wlm'] = { + 'workload-status': 'blocked', + 'workload-status-message': 'application not trusted'} + zaza_model.wait_for_application_states( + states=test_config.get('target_deploy_status', {}), + timeout=7200) + zaza_model.block_until_all_units_idle() + + +def basic_setup(): + """Run basic setup for Trilio apps.""" + backup_target_type = zaza_model.get_application_config( + 'trilio-wlm')['backup-target-type']['value'] + if backup_target_type == "nfs": + nfs_setup() + if backup_target_type in ["s3", "experimental-s3"]: + s3_setup() + trust_setup() + license_setup() + + +def python2_workaround(): + """Workaround for Bug #1915914. + + Trilio code currently has a bug which assumes an executable called 'python' + will be on the path. To workaround this install a package which adds a + symlink to python + """ + for unit in zaza_model.get_units('trilio-wlm'): + zaza_model.run_on_unit( + unit.entity_id, + ("apt install --yes python-is-python3; " + "systemctl restart wlm\\*.service")) diff --git a/zaza/openstack/charm_tests/trilio/tests.py b/zaza/openstack/charm_tests/trilio/tests.py new file mode 100644 index 0000000..6fb7692 --- /dev/null +++ b/zaza/openstack/charm_tests/trilio/tests.py @@ -0,0 +1,496 @@ +#!/usr/bin/env python3 + +# Copyright 2018 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of tests for vault.""" + +import logging +import tenacity + +import zaza.model as zaza_model + +import zaza.openstack.charm_tests.glance.setup as glance_setup +import zaza.openstack.charm_tests.test_utils as test_utils +import zaza.openstack.configure.guest as guest_utils +import zaza.openstack.utilities.openstack as openstack_utils +import zaza.openstack.utilities.generic as generic_utils +from zaza.utilities import juju as juju_utils + + +def _resource_reaches_status( + unit, auth_args, status_command, full_status_command, resource_id, + target_status +): + """Wait for a workload resource to reach a status. + + :param unit: unit to run cli commands on + :type unit: zaza_model.Unit + :param auth_args: authentication arguments for command + :type auth_args: str + :param status_command: command to execute to get the resource status that + is expected to reach target_status + :type status_command: str + :param full_status_command: command to execute to get insights on why the + resource failed to reach target_status + :type full_status_command: str + :param resource_id: resource ID to monitor + :type resource_id: str + :param target_status: status to monitor for + :type target_status: str + """ + resource_status = ( + juju_utils.remote_run( + unit, + remote_cmd=status_command.format( + auth_args=auth_args, resource_id=resource_id + ), + timeout=180, + fatal=True, + ) + .strip() + .split("\n")[-1] + ) + logging.info( + "Checking resource ({}) status: {}".format( + resource_id, resource_status + ) + ) + if resource_status == target_status: + return + + full_resource_status = ( + juju_utils.remote_run( + unit, + remote_cmd=full_status_command.format( + auth_args=auth_args, resource_id=resource_id + ), + timeout=180, + fatal=True, + ) + .strip() + ) + + raise Exception("Resource not ready:\n{}".format(full_resource_status)) + + +class WorkloadmgrCLIHelper(object): + """Helper for working with workloadmgrcli.""" + + WORKLOAD_CREATE_CMD = ( + "openstack {auth_args} workload create " + "--instance instance-id={instance_id} " + "-f value -c ID" + ) + + WORKLOAD_STATUS_CMD = ( + "openstack {auth_args} workload show " + "-f value -c status " + "{resource_id}" + ) + + WORKLOAD_FULL_STATUS_CMD = ( + "openstack {auth_args} workload show " + "{resource_id}" + ) + + SNAPSHOT_CMD = ( + "openstack {auth_args} workload snapshot --full {workload_id}" + ) + + SNAPSHOT_ID_CMD = ( + "openstack {auth_args} workload snapshot list " + "--workload_id {workload_id} " + "-f value -c ID" + ) + + SNAPSHOT_STATUS_CMD = ( + "openstack {auth_args} workload snapshot show " + "-f value -c status " + "{resource_id}" + ) + + SNAPSHOT_FULL_STATUS_CMD = ( + "openstack {auth_args} workload snapshot show " + "{resource_id}" + ) + + ONECLICK_RESTORE_CMD = ( + "openstack {auth_args} workload snapshot oneclick-restore " + "{snapshot_id} " + ) + + RESTORE_LIST_CMD = ( + "openstack {auth_args} workloadmgr restore list " + "--snapshot_id {snapshot_id} " + "-f value -c ID" + ) + + RESTORE_STATUS_CMD = ( + "openstack {auth_args} workloadmgr restore show " + "-f value -c status " + "{resource_id}" + ) + + RESTORE_FULL_STATUS_CMD = ( + "openstack {auth_args} workloadmgr restore show " + "{resource_id}" + ) + + def __init__(self, keystone_client): + """Initialise helper. + + :param keystone_client: keystone client + :type keystone_client: keystoneclient.v3 + """ + self.trilio_wlm_unit = zaza_model.get_first_unit_name( + "trilio-wlm" + ) + self.auth_args = self._auth_arguments(keystone_client) + + @classmethod + def _auth_arguments(cls, keystone_client): + """Generate workloadmgrcli arguments for cloud authentication. + + :returns: string of required cli arguments for authentication + :rtype: str + """ + overcloud_auth = openstack_utils.get_overcloud_auth() + overcloud_auth.update( + { + "OS_DOMAIN_ID": openstack_utils.get_domain_id( + keystone_client, domain_name="admin_domain" + ), + "OS_TENANT_ID": openstack_utils.get_project_id( + keystone_client, + project_name="admin", + domain_name="admin_domain", + ), + "OS_TENANT_NAME": "admin", + } + ) + + _required_keys = [ + "OS_AUTH_URL", + "OS_USERNAME", + "OS_PASSWORD", + "OS_REGION_NAME", + "OS_DOMAIN_ID", + "OS_TENANT_ID", + "OS_TENANT_NAME", + ] + + params = [] + for os_key in _required_keys: + params.append( + "--{}={}".format( + os_key.lower().replace("_", "-"), + overcloud_auth[os_key], + ) + ) + return " ".join(params) + + def create_workload(self, instance_id): + """Create a new workload. + + :param instance_id: instance ID to create workload from + :type instance_id: str + :returns: workload ID + :rtype: str + """ + workload_id = juju_utils.remote_run( + self.trilio_wlm_unit, + remote_cmd=self.WORKLOAD_CREATE_CMD.format( + auth_args=self.auth_args, instance_id=instance_id + ), + timeout=180, + fatal=True, + ).strip() + + retryer = tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=1, max=30), + stop=tenacity.stop_after_delay(180), + reraise=True, + ) + retryer( + _resource_reaches_status, + self.trilio_wlm_unit, + self.auth_args, + self.WORKLOAD_STATUS_CMD, + self.WORKLOAD_FULL_STATUS_CMD, + workload_id, + "available", + ) + + return workload_id + + def create_snapshot(self, workload_id): + """Create a new snapshot. + + :param workload_id: workload ID to create snapshot from + :type workload_id: str + :returns: snapshot ID + :rtype: str + """ + juju_utils.remote_run( + self.trilio_wlm_unit, + remote_cmd=self.SNAPSHOT_CMD.format( + auth_args=self.auth_args, workload_id=workload_id + ), + timeout=180, + fatal=True, + ) + snapshot_id = juju_utils.remote_run( + self.trilio_wlm_unit, + remote_cmd=self.SNAPSHOT_ID_CMD.format( + auth_args=self.auth_args, workload_id=workload_id + ), + timeout=180, + fatal=True, + ).strip() + + retryer = tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=1, max=30), + stop=tenacity.stop_after_delay(900), + reraise=True, + ) + + retryer( + _resource_reaches_status, + self.trilio_wlm_unit, + self.auth_args, + self.SNAPSHOT_STATUS_CMD, + self.SNAPSHOT_FULL_STATUS_CMD, + snapshot_id, + "available", + ) + + return snapshot_id + + def oneclick_restore(self, snapshot_id): + """Restore a workload from a snapshot. + + :param snapshot_id: snapshot ID to restore + :type snapshot_id: str + """ + juju_utils.remote_run( + self.trilio_wlm_unit, + remote_cmd=self.ONECLICK_RESTORE_CMD.format( + auth_args=self.auth_args, snapshot_id=snapshot_id + ), + timeout=180, + fatal=True, + ) + restore_id = juju_utils.remote_run( + self.trilio_wlm_unit, + remote_cmd=self.RESTORE_LIST_CMD.format( + auth_args=self.auth_args, snapshot_id=snapshot_id + ), + timeout=180, + fatal=True, + ).strip() + + retryer = tenacity.Retrying( + wait=tenacity.wait_exponential(multiplier=1, max=30), + stop=tenacity.stop_after_delay(720), + reraise=True, + ) + + retryer( + _resource_reaches_status, + self.trilio_wlm_unit, + self.auth_args, + self.RESTORE_STATUS_CMD, + self.RESTORE_FULL_STATUS_CMD, + restore_id, + "available", + ) + + return restore_id + + +class TrilioBaseTest(test_utils.OpenStackBaseTest): + """Base test class for charms.""" + + RESOURCE_PREFIX = "zaza-triliovault-tests" + conf_file = None + + @classmethod + def setUpClass(cls): + """Run class setup for running tests.""" + super().setUpClass(application_name=cls.application_name) + cls.cinder_client = openstack_utils.get_cinder_session_client( + cls.keystone_session + ) + cls.nova_client = openstack_utils.get_nova_session_client( + cls.keystone_session + ) + cls.keystone_client = openstack_utils.get_keystone_session_client( + cls.keystone_session + ) + + def test_restart_on_config_change(self): + """Check restart happens on config change. + + Change debug mode and assert that change propagates to the correct + file and that services are restarted as a result + """ + # Expected default and alternate values + set_default = {"debug": False} + set_alternate = {"debug": True} + + # Make config change, check for service restarts + self.restart_on_changed( + self.conf_file, + set_default, + set_alternate, + {"DEFAULT": {"debug": ["False"]}}, + {"DEFAULT": {"debug": ["True"]}}, + self.services, + ) + + def test_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped then resume and check + they are started + """ + with self.pause_resume(self.services, pgrep_full=False): + logging.info("Testing pause resume") + + def test_snapshot_workload(self): + """Ensure that a workload can be created and snapshot'ed.""" + # Setup volume and instance and attach one to the other + volume = openstack_utils.create_volume( + self.cinder_client, + size="1", + name="{}-100-vol".format(self.RESOURCE_PREFIX), + ) + + instance = guest_utils.launch_instance( + glance_setup.CIRROS_IMAGE_NAME, + vm_name="{}-server".format(self.RESOURCE_PREFIX), + ) + + # Trilio need direct access to ceph - OMG + openstack_utils.attach_volume( + self.nova_client, volume.id, instance.id + ) + + workloadmgrcli = WorkloadmgrCLIHelper(self.keystone_client) + + # Create workload using instance + logging.info("Creating workload configuration") + workload_id = workloadmgrcli.create_workload(instance.id) + logging.info("Created workload: {}".format(workload_id)) + + logging.info("Initiating snapshot") + snapshot_id = workloadmgrcli.create_snapshot(workload_id) + logging.info( + "Snapshot of workload {} created: {}".format( + workload_id, snapshot_id + ) + ) + + logging.info("Deleting server and volume ready for restore") + openstack_utils.delete_resource( + self.nova_client.servers, instance.id, "deleting instance" + ) + # NOTE: Trilio leaves a snapshot in place - + # drop before volume deletion. + for ( + volume_snapshot + ) in self.cinder_client.volume_snapshots.list(): + openstack_utils.delete_resource( + self.cinder_client.volume_snapshots, + volume_snapshot.id, + "deleting snapshot", + ) + openstack_utils.delete_resource( + self.cinder_client.volumes, volume.id, "deleting volume" + ) + + logging.info("Initiating restore") + workloadmgrcli.oneclick_restore(snapshot_id) + + +class TrilioGhostNFSShareTest(TrilioBaseTest): + """Tests for Trilio charms providing the ghost-share action.""" + + def test_ghost_nfs_share(self): + """Ensure ghost-share action bind mounts NFS share.""" + generic_utils.assertActionRanOK(zaza_model.run_action( + self.lead_unit, + 'ghost-share', + action_params={ + 'nfs-shares': '10.20.0.1:/srv/testing' + }, + model_name=self.model_name) + ) + + +class TrilioWLMBaseTest(TrilioBaseTest): + """Tests for Trilio Workload Manager charm.""" + + conf_file = "/etc/workloadmgr/workloadmgr.conf" + application_name = "trilio-wlm" + + services = [ + "workloadmgr-api", + "workloadmgr-scheduler", + "workloadmgr-workloads", + "workloadmgr-cron", + ] + + +class TrilioDMAPITest(TrilioBaseTest): + """Tests for Trilio Data Mover API charm.""" + + conf_file = "/etc/dmapi/dmapi.conf" + application_name = "trilio-dm-api" + + services = ["dmapi-api"] + + +class TrilioDataMoverBaseTest(TrilioBaseTest): + """Tests for Trilio Data Mover charm.""" + + conf_file = "/etc/tvault-contego/tvault-contego.conf" + application_name = "trilio-data-mover" + + services = ["tvault-contego"] + + +class TrilioDataMoverNFSTest(TrilioDataMoverBaseTest, TrilioGhostNFSShareTest): + """Tests for Trilio Data Mover charm backed by NFS.""" + + application_name = "trilio-data-mover" + + +class TrilioDataMoverS3Test(TrilioDataMoverBaseTest): + """Tests for Trilio Data Mover charm backed by S3.""" + + application_name = "trilio-data-mover" + + +class TrilioWLMNFSTest(TrilioWLMBaseTest, TrilioGhostNFSShareTest): + """Tests for Trilio WLM charm backed by NFS.""" + + application_name = "trilio-wlm" + + +class TrilioWLMS3Test(TrilioWLMBaseTest): + """Tests for Trilio WLM charm backed by S3.""" + + application_name = "trilio-wlm" diff --git a/zaza/openstack/charm_tests/vault/setup.py b/zaza/openstack/charm_tests/vault/setup.py index 37bd332..c792508 100644 --- a/zaza/openstack/charm_tests/vault/setup.py +++ b/zaza/openstack/charm_tests/vault/setup.py @@ -14,7 +14,9 @@ """Run configuration phase.""" +import base64 import functools +import logging import requests import tempfile @@ -23,6 +25,25 @@ import zaza.openstack.charm_tests.vault.utils as vault_utils import zaza.model import zaza.openstack.utilities.cert import zaza.openstack.utilities.openstack +import zaza.openstack.utilities.generic +import zaza.openstack.utilities.exceptions as zaza_exceptions +import zaza.utilities.juju as juju_utils + + +def get_cacert_file(): + """Retrieve CA cert used for vault endpoints and write to file. + + :returns: Path to file with CA cert. + :rtype: str + """ + cacert_file = None + vault_config = zaza.model.get_application_config('vault') + cacert_b64 = vault_config['ssl-ca']['value'] + if cacert_b64: + with tempfile.NamedTemporaryFile(mode='wb', delete=False) as fp: + fp.write(base64.b64decode(cacert_b64)) + cacert_file = fp.name + return cacert_file def basic_setup(cacert=None, unseal_and_authorize=False): @@ -33,29 +54,90 @@ def basic_setup(cacert=None, unseal_and_authorize=False): :param unseal_and_authorize: Whether to unseal and authorize vault. :type unseal_and_authorize: bool """ - clients = vault_utils.get_clients(cacert=cacert) - vip_client = vault_utils.get_vip_client(cacert=cacert) - if vip_client: - unseal_client = vip_client - else: - unseal_client = clients[0] - initialized = vault_utils.is_initialized(unseal_client) - # The credentials are written to a file to allow the tests to be re-run - # this is mainly useful for manually working on the tests. - if initialized: - vault_creds = vault_utils.get_credentails() - else: - vault_creds = vault_utils.init_vault(unseal_client) - vault_utils.store_credentails(vault_creds) - - # For use by charms or bundles other than vault + cacert = cacert or get_cacert_file() + vault_svc = vault_utils.VaultFacade(cacert=cacert) if unseal_and_authorize: - vault_utils.unseal_all(clients, vault_creds['keys'][0]) - vault_utils.auth_all(clients, vault_creds['root_token']) - vault_utils.run_charm_authorize(vault_creds['root_token']) + vault_svc.unseal() + vault_svc.authorize() -def auto_initialize(cacert=None, validation_application='keystone'): +def basic_setup_and_unseal(cacert=None): + """Initialize (if needed) and unseal vault. + + :param cacert: Path to CA cert used for vaults api cert. + :type cacert: str + """ + cacert = cacert or get_cacert_file() + vault_svc = vault_utils.VaultFacade(cacert=cacert) + vault_svc.unseal() + for unit in zaza.model.get_units('vault'): + zaza.model.run_on_unit(unit.name, './hooks/update-status') + + +async def mojo_or_default_unseal_by_unit(): + """Unseal any units reported as sealed using a cacert. + + The mojo cacert is tried first, and if that doesn't exist, then the default + zaza located cacert is used. + """ + try: + await mojo_unseal_by_unit() + except zaza_exceptions.CACERTNotFound: + await unseal_by_unit() + + +def mojo_unseal_by_unit(): + """Unseal any units reported as sealed using mojo cacert.""" + cacert = zaza.openstack.utilities.generic.get_mojo_cacert_path() + unseal_by_unit(cacert) + + +def unseal_by_unit(cacert=None): + """Unseal any units reported as sealed using mojo cacert.""" + cacert = cacert or get_cacert_file() + vault_creds = vault_utils.get_credentails() + for client in vault_utils.get_clients(cacert=cacert): + if client.hvac_client.is_sealed(): + client.hvac_client.unseal(vault_creds['keys'][0]) + unit_name = juju_utils.get_unit_name_from_ip_address( + client.addr, + 'vault') + zaza.model.run_on_unit(unit_name, './hooks/update-status') + + +async def async_mojo_or_default_unseal_by_unit(): + """Unseal any units reported as sealed using a cacert. + + The mojo cacert is tried first, and if that doesn't exist, then the default + zaza located cacert is used. + """ + try: + await async_mojo_unseal_by_unit() + except zaza_exceptions.CACERTNotFound: + await async_unseal_by_unit() + + +async def async_mojo_unseal_by_unit(): + """Unseal any units reported as sealed using mojo cacert.""" + cacert = zaza.openstack.utilities.generic.get_mojo_cacert_path() + await async_unseal_by_unit(cacert) + + +async def async_unseal_by_unit(cacert=None): + """Unseal any units reported as sealed using vault cacert.""" + cacert = cacert or get_cacert_file() + vault_creds = vault_utils.get_credentails() + for client in vault_utils.get_clients(cacert=cacert): + if client.hvac_client.is_sealed(): + client.hvac_client.unseal(vault_creds['keys'][0]) + unit_name = await juju_utils.async_get_unit_name_from_ip_address( + client.addr, + 'vault') + await zaza.model.async_run_on_unit( + unit_name, './hooks/update-status') + + +def auto_initialize(cacert=None, validation_application='keystone', wait=True): """Auto initialize vault for testing. Generate a csr and uploading a signed certificate. @@ -70,6 +152,7 @@ def auto_initialize(cacert=None, validation_application='keystone'): :returns: None :rtype: None """ + logging.info('Running auto_initialize') basic_setup(cacert=cacert, unseal_and_authorize=True) action = vault_utils.run_get_csr() @@ -87,20 +170,33 @@ def auto_initialize(cacert=None, validation_application='keystone'): root_ca=cacertificate, allowed_domains='openstack.local') + if wait: + zaza.model.wait_for_agent_status() + test_config = lifecycle_utils.get_charm_config(fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get('target_deploy_status', {}), + timeout=7200) + if validation_application: validate_ca(cacertificate, application=validation_application) # Once validation has completed restart nova-compute to work around # bug #1826382 - try: - cmd = 'systemctl restart nova-compute' - for unit in zaza.model.get_units('nova-compute'): - result = zaza.model.run_on_unit(unit.entity_id, cmd) - assert int(result['Code']) == 0, ( - 'Restart of nova-compute on {} failed'.format( - unit.entity_id)) - except KeyError: - # Nothing todo if there are no nova-compute units - pass + cmd_map = { + 'nova-cloud-controller': ('systemctl restart ' + 'nova-scheduler nova-conductor'), + 'nova-compute': 'systemctl restart nova-compute', + } + for app in ('nova-compute', 'nova-cloud-controller',): + try: + for unit in zaza.model.get_units(app): + result = zaza.model.run_on_unit( + unit.entity_id, cmd_map[app]) + assert int(result['Code']) == 0, ( + 'Restart of services on {} failed'.format( + unit.entity_id)) + except KeyError: + # Nothing todo if there are no app units + pass auto_initialize_no_validation = functools.partial( @@ -108,6 +204,12 @@ auto_initialize_no_validation = functools.partial( validation_application=None) +auto_initialize_no_validation_no_wait = functools.partial( + auto_initialize, + validation_application=None, + wait=False) + + def validate_ca(cacertificate, application="keystone", port=5000): """Validate Certificate Authority against application. @@ -120,13 +222,9 @@ def validate_ca(cacertificate, application="keystone", port=5000): :returns: None :rtype: None """ - zaza.model.block_until_file_has_contents( + zaza.openstack.utilities.openstack.block_until_ca_exists( application, - zaza.openstack.utilities.openstack.KEYSTONE_REMOTE_CACERT, cacertificate.decode().strip()) - test_config = lifecycle_utils.get_charm_config() - zaza.model.wait_for_application_states( - states=test_config.get('target_deploy_status', {})) vip = (zaza.model.get_application_config(application) .get("vip").get("value")) if vip: diff --git a/zaza/openstack/charm_tests/vault/tests.py b/zaza/openstack/charm_tests/vault/tests.py index dfd4cc2..a83440f 100644 --- a/zaza/openstack/charm_tests/vault/tests.py +++ b/zaza/openstack/charm_tests/vault/tests.py @@ -16,6 +16,9 @@ """Collection of tests for vault.""" +import contextlib +import json +import logging import unittest import uuid import tempfile @@ -30,6 +33,7 @@ import zaza.openstack.charm_tests.vault.utils as vault_utils import zaza.openstack.utilities.cert import zaza.openstack.utilities.openstack import zaza.model +import zaza.utilities.juju as juju_utils @tenacity.retry( @@ -47,12 +51,15 @@ def retry_hvac_client_authenticated(client): return client.hvac_client.is_authenticated() -class BaseVaultTest(unittest.TestCase): +class BaseVaultTest(test_utils.OpenStackBaseTest): """Base class for vault tests.""" @classmethod def setUpClass(cls): """Run setup for Vault tests.""" + cls.model_name = zaza.model.get_juju_model() + cls.lead_unit = zaza.model.get_lead_unit_name( + "vault", model_name=cls.model_name) cls.clients = vault_utils.get_clients() cls.vip_client = vault_utils.get_vip_client() if cls.vip_client: @@ -62,6 +69,43 @@ class BaseVaultTest(unittest.TestCase): vault_utils.auth_all(cls.clients, cls.vault_creds['root_token']) vault_utils.ensure_secret_backend(cls.clients[0]) + def tearDown(self): + """Tun test cleanup for Vault tests.""" + vault_utils.unseal_all(self.clients, self.vault_creds['keys'][0]) + + @contextlib.contextmanager + def pause_resume(self, services, pgrep_full=False): + """Override pause_resume for Vault behavior.""" + zaza.model.block_until_service_status( + self.lead_unit, + services, + 'running', + model_name=self.model_name) + zaza.model.block_until_unit_wl_status( + self.lead_unit, + 'active', + model_name=self.model_name) + zaza.model.block_until_all_units_idle(model_name=self.model_name) + zaza.model.run_action( + self.lead_unit, + 'pause', + model_name=self.model_name) + zaza.model.block_until_service_status( + self.lead_unit, + services, + 'blocked', # Service paused + model_name=self.model_name) + yield + zaza.model.run_action( + self.lead_unit, + 'resume', + model_name=self.model_name) + zaza.model.block_until_service_status( + self.lead_unit, + services, + 'blocked', # Service sealed + model_name=self.model_name) + class UnsealVault(BaseVaultTest): """Unseal Vault only. @@ -84,7 +128,11 @@ class UnsealVault(BaseVaultTest): vault_utils.run_charm_authorize(self.vault_creds['root_token']) if not test_config: test_config = lifecycle_utils.get_charm_config() - del test_config['target_deploy_status']['vault'] + try: + del test_config['target_deploy_status']['vault'] + except KeyError: + # Already removed + pass zaza.model.wait_for_application_states( states=test_config.get('target_deploy_status', {})) @@ -127,19 +175,31 @@ class VaultTest(BaseVaultTest): allowed_domains='openstack.local') test_config = lifecycle_utils.get_charm_config() - del test_config['target_deploy_status']['vault'] - zaza.model.block_until_file_has_contents( + try: + del test_config['target_deploy_status']['vault'] + except KeyError: + # Already removed + pass + zaza.openstack.utilities.openstack.block_until_ca_exists( 'keystone', - zaza.openstack.utilities.openstack.KEYSTONE_REMOTE_CACERT, cacert.decode().strip()) zaza.model.wait_for_application_states( states=test_config.get('target_deploy_status', {})) ip = zaza.model.get_app_ips( 'keystone')[0] + with tempfile.NamedTemporaryFile(mode='w') as fp: fp.write(cacert.decode()) fp.flush() - requests.get('https://{}:5000'.format(ip), verify=fp.name) + # Avoid race condition and retry + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(3), + wait=tenacity.wait_exponential( + multiplier=2, min=2, max=10)): + with attempt: + logging.info( + "Attempting to connect to https://{}:5000".format(ip)) + requests.get('https://{}:5000'.format(ip), verify=fp.name) def test_all_clients_authenticated(self): """Check all vault clients are authenticated.""" @@ -203,6 +263,97 @@ class VaultTest(BaseVaultTest): 'local-charm-policy', client.hvac_client.list_policies()) + def test_zzz_pause_resume(self): + """Run pause and resume tests. + + Pause service and check services are stopped, then resume and check + they are started. + """ + vault_actions = zaza.model.get_actions( + 'vault') + if 'pause' not in vault_actions or 'resume' not in vault_actions: + raise unittest.SkipTest("The version of charm-vault tested does " + "not have pause/resume actions") + # this pauses and resumes the LEAD unit + with self.pause_resume(['vault']): + logging.info("Testing pause resume") + lead_client = vault_utils.extract_lead_unit_client(self.clients) + self.assertTrue(lead_client.hvac_client.seal_status['sealed']) + + def test_vault_reload(self): + """Run reload tests. + + Reload service and check services were restarted + by doing simple change in the running config by API. + Then confirm that service is not sealed + """ + vault_actions = zaza.model.get_actions( + 'vault') + if 'reload' not in vault_actions: + raise unittest.SkipTest("The version of charm-vault tested does " + "not have reload action") + + container_results = zaza.model.run_on_leader( + "vault", "systemd-detect-virt --container" + ) + container_rc = json.loads(container_results["Code"]) + if container_rc == 0: + raise unittest.SkipTest( + "Vault unit is running in a container. Cannot use mlock." + ) + + lead_client = vault_utils.get_cluster_leader(self.clients) + running_config = vault_utils.get_running_config(lead_client) + value_to_set = not running_config['data']['disable_mlock'] + + logging.info("Setting disable-mlock to {}".format(str(value_to_set))) + zaza.model.set_application_config( + 'vault', + {'disable-mlock': str(value_to_set)}) + + logging.info("Waiting for model to be idle ...") + zaza.model.block_until_all_units_idle(model_name=self.model_name) + + logging.info("Testing action reload on {}".format(lead_client)) + zaza.model.run_action( + juju_utils.get_unit_name_from_ip_address( + lead_client.addr, 'vault'), + 'reload', + model_name=self.model_name) + + logging.info("Getting new value ...") + new_value = vault_utils.get_running_config(lead_client)[ + 'data']['disable_mlock'] + + logging.info( + "Asserting new value {} is equal to set value {}" + .format(new_value, value_to_set)) + self.assertEqual( + value_to_set, + new_value) + + logging.info("Asserting not sealed") + self.assertFalse(lead_client.hvac_client.seal_status['sealed']) + + def test_vault_restart(self): + """Run pause and resume tests. + + Restart service and check services are started. + """ + vault_actions = zaza.model.get_actions( + 'vault') + if 'restart' not in vault_actions: + raise unittest.SkipTest("The version of charm-vault tested does " + "not have restart action") + logging.info("Testing restart") + zaza.model.run_action_on_leader( + 'vault', + 'restart', + action_params={}) + + lead_client = vault_utils.extract_lead_unit_client(self.clients) + self.assertTrue(lead_client.hvac_client.seal_status['sealed']) + if __name__ == '__main__': unittest.main() diff --git a/zaza/openstack/charm_tests/vault/utils.py b/zaza/openstack/charm_tests/vault/utils.py index 26bbb2f..b4b5579 100644 --- a/zaza/openstack/charm_tests/vault/utils.py +++ b/zaza/openstack/charm_tests/vault/utils.py @@ -27,12 +27,69 @@ import tenacity import collections import zaza.model +import zaza.utilities.networking as network_utils AUTH_FILE = "vault_tests.yaml" CharmVaultClient = collections.namedtuple( 'CharmVaultClient', ['addr', 'hvac_client', 'vip_client']) +class VaultFacade: + """Provide a facade for interacting with vault. + + For example to setup new vault deployment:: + + vault_svc = VaultFacade() + vault_svc.unseal() + vault_svc.authorize() + """ + + def __init__(self, cacert=None, initialize=True): + """Create a facade for interacting with vault. + + :param cacert: Path to CA cert used for vaults api cert. + :type cacert: str + :param initialize: Whether to initialize vault. + :type initialize: bool + """ + self.clients = get_clients(cacert=cacert) + self.vip_client = get_vip_client(cacert=cacert) + if self.vip_client: + self.unseal_client = self.vip_client + else: + self.unseal_client = self.clients[0] + self.initialized = is_initialized(self.unseal_client) + if initialize: + self.initialize() + + @property + def is_initialized(self): + """Check if vault is initialized.""" + return self.initialized + + def initialize(self): + """Initialise vault and store resulting credentials.""" + if self.is_initialized: + self.vault_creds = get_credentails() + else: + self.vault_creds = init_vault(self.unseal_client) + store_credentails(self.vault_creds) + self.initialized = is_initialized(self.unseal_client) + + def unseal(self): + """Unseal all the vaults clients.""" + unseal_all(self.clients, self.vault_creds['keys'][0]) + + def authorize(self): + """Authorize charm to perfom certain actions. + + Run vault charm action to authorize the charm to perform a limited + set of calls against the vault API. + """ + auth_all(self.clients, self.vault_creds['root_token']) + run_charm_authorize(self.vault_creds['root_token']) + + def get_unit_api_url(ip): """Return URL for api access. @@ -45,7 +102,7 @@ def get_unit_api_url(ip): transport = 'http' if vault_config['ssl-cert']['value']: transport = 'https' - return '{}://{}:8200'.format(transport, ip) + return '{}://{}:8200'.format(transport, network_utils.format_addr(ip)) def get_hvac_client(vault_url, cacert=None): @@ -80,6 +137,41 @@ def get_vip_client(cacert=None): return client +def get_cluster_leader(clients): + """Get Vault cluster leader. + + We have to make sure we run api calls against the actual leader. + + :param clients: Clients list to get leader + :type clients: List of CharmVaultClient + :returns: CharmVaultClient + :rtype: CharmVaultClient or None + """ + if len(clients) == 1: + return clients[0] + + for client in clients: + if client.hvac_client.ha_status['is_self']: + return client + return None + + +def get_running_config(client): + """Get Vault running config. + + The hvac library does not support getting info from endpoint + /v1/sys/config/state/sanitized Therefore we implement it here + + :param client: Client used to get config + :type client: CharmVaultClient + :returns: dict from Vault api response + :rtype: dict + """ + return requests.get( + client.hvac_client.adapter.base_uri + '/v1/sys/config/state/sanitized', + headers={'X-Vault-Token': client.hvac_client.token}).json() + + def init_vault(client, shares=1, threshold=1): """Initialise vault. @@ -117,6 +209,37 @@ def get_clients(units=None, cacert=None): return clients +def extract_lead_unit_client( + clients=None, application_name='vault', cacert=None): + """Find the lead unit client. + + This returns the lead unit client from a list of clients. If no clients + are passed, then the clients are resolved using the cacert (if needed) and + the application_name. The client is then matched to the lead unit. If + clients are passed, but no leader is found in them, then the function + raises a RuntimeError. + + :param clients: List of CharmVaultClient + :type clients: List[CharmVaultClient] + :param application_name: The application name + :type application_name: str + :param cacert: Path to CA cert used for vaults api cert. + :type cacert: str + :returns: The leader client + :rtype: CharmVaultClient + :raises: RuntimeError if the lead unit cannot be found + """ + if clients is None: + units = zaza.model.get_app_ips('vault') + clients = get_clients(units, cacert) + lead_ip = zaza.model.get_lead_unit_ip(application_name) + for client in clients: + if client.addr == lead_ip: + return client + raise RuntimeError("Leader client not found for application: {}" + .format(application_name)) + + @tenacity.retry( retry=tenacity.retry_if_exception_type(( ConnectionRefusedError, @@ -155,6 +278,22 @@ def ensure_secret_backend(client): pass +def find_unit_with_creds(): + """Find the unit thats has stored the credentials. + + :returns: unit name + :rtype: str + """ + unit = None + for vault_unit in zaza.model.get_units('vault'): + cmd = 'ls -l ~ubuntu/{}'.format(AUTH_FILE) + resp = zaza.model.run_on_unit(vault_unit.name, cmd) + if resp.get('Code') == '0': + unit = vault_unit.name + break + return unit + + def get_credentails(): """Retrieve vault token and keys from unit. @@ -164,7 +303,7 @@ def get_credentails(): :returns: Tokens and keys for accessing test environment :rtype: dict """ - unit = zaza.model.get_first_unit_name('vault') + unit = find_unit_with_creds() with tempfile.TemporaryDirectory() as tmpdirname: tmp_file = '{}/{}'.format(tmpdirname, AUTH_FILE) zaza.model.scp_from_unit( diff --git a/zaza/openstack/configure/bgp_speaker.py b/zaza/openstack/configure/bgp_speaker.py index 2f5dbb9..4fd9587 100755 --- a/zaza/openstack/configure/bgp_speaker.py +++ b/zaza/openstack/configure/bgp_speaker.py @@ -19,7 +19,6 @@ import argparse import logging import sys -import neutronclient from zaza.openstack.utilities import ( cli as cli_utils, openstack as openstack_utils, @@ -99,40 +98,6 @@ def setup_bgp_speaker(peer_application_name, keystone_session=None): "Advertised floating IP: {}".format( floating_ip["floating_ip_address"])) - # NOTE(fnordahl): As a workaround for LP: #1784083 remove BGP speaker from - # dragent and add it back. - logging.info( - "Waiting for Neutron agent 'neutron-bgp-dragent' to appear...") - keystone_session = openstack_utils.get_overcloud_keystone_session() - neutron_client = openstack_utils.get_neutron_session_client( - keystone_session) - agents = openstack_utils.neutron_agent_appears(neutron_client, - 'neutron-bgp-dragent') - agent_id = None - for agent in agents.get('agents', []): - agent_id = agent.get('id', None) - if agent_id is not None: - break - logging.info( - 'Waiting for BGP speaker to appear on agent "{}"...'.format(agent_id)) - bgp_speakers = openstack_utils.neutron_bgp_speaker_appears_on_agent( - neutron_client, agent_id) - logging.info( - "Removing and adding back bgp-speakers to agent (LP: #1784083)...") - while True: - try: - for bgp_speaker in bgp_speakers.get('bgp_speakers', []): - bgp_speaker_id = bgp_speaker.get('id', None) - logging.info('removing "{}" from "{}"' - ''.format(bgp_speaker_id, agent_id)) - neutron_client.remove_bgp_speaker_from_dragent( - agent_id, bgp_speaker_id) - except neutronclient.common.exceptions.NotFound as e: - logging.info('Exception: "{}"'.format(e)) - break - neutron_client.add_bgp_speaker_to_dragent( - agent_id, {'bgp_speaker_id': bgp_speaker_id}) - def run_from_cli(): """Run BGP Speaker setup from CLI. diff --git a/zaza/openstack/configure/guest.py b/zaza/openstack/configure/guest.py index 9418cb3..c2e20de 100644 --- a/zaza/openstack/configure/guest.py +++ b/zaza/openstack/configure/guest.py @@ -16,11 +16,20 @@ """Encapsulate nova testing.""" +import subprocess import logging import time import zaza.openstack.utilities.openstack as openstack_utils import zaza.openstack.charm_tests.nova.utils as nova_utils +import zaza.openstack.utilities.exceptions as openstack_exceptions + +from tenacity import ( + RetryError, + Retrying, + stop_after_attempt, + wait_exponential, +) boot_tests = { 'cirros': { @@ -62,6 +71,8 @@ def launch_instance(instance_key, use_boot_volume=False, vm_name=None, :type meta: dict :param userdata: Configuration to use upon launch, used by cloud-init. :type userdata: str + :returns: the created instance + :rtype: novaclient.Server """ keystone_session = openstack_utils.get_overcloud_keystone_session() nova_client = openstack_utils.get_nova_session_client(keystone_session) @@ -130,7 +141,21 @@ def launch_instance(instance_key, use_boot_volume=False, vm_name=None, external_network_name, port=port)['floating_ip_address'] logging.info('Assigned floating IP {} to {}'.format(ip, vm_name)) - openstack_utils.ping_response(ip) + try: + for attempt in Retrying( + stop=stop_after_attempt(8), + wait=wait_exponential(multiplier=1, min=2, max=60)): + with attempt: + try: + openstack_utils.ping_response(ip) + except subprocess.CalledProcessError as e: + logging.error('Pinging {} failed with {}' + .format(ip, e.returncode)) + logging.error('stdout: {}'.format(e.stdout)) + logging.error('stderr: {}'.format(e.stderr)) + raise + except RetryError: + raise openstack_exceptions.NovaGuestNoPingResponse() # Check ssh'ing to instance. logging.info('Testing ssh access.') @@ -140,3 +165,4 @@ def launch_instance(instance_key, use_boot_volume=False, vm_name=None, vm_name=vm_name, password=boot_tests[instance_key].get('password'), privkey=openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME)) + return instance diff --git a/zaza/openstack/configure/hacluster.py b/zaza/openstack/configure/hacluster.py index 9e687b8..eec2a73 100644 --- a/zaza/openstack/configure/hacluster.py +++ b/zaza/openstack/configure/hacluster.py @@ -66,6 +66,23 @@ def get_nodes_status(service_name, model_name=None): return status +def remove_node(service_name, node_name, model_name=None): + """Remove given node from pacemaker. + + :param service_name: Name of Juju application to run query against. + :type service_name: str + :param node_name: Name of node to delete. + :type node_name: str + :param model_name: Name of model unit_name resides in. + :type model_name: str + """ + remove_cmd = 'crm_node --force -R {}'.format(node_name) + zaza.model.run_on_leader( + service_name, + remove_cmd, + model_name=model_name) + + def check_all_nodes_online(service_name, model_name=None): """Return whether all the crm nodes are online. diff --git a/zaza/openstack/configure/masakari.py b/zaza/openstack/configure/masakari.py index 9d48e03..707416f 100644 --- a/zaza/openstack/configure/masakari.py +++ b/zaza/openstack/configure/masakari.py @@ -19,6 +19,8 @@ and recovery. """ import logging +import openstack.exceptions as ostack_except +import tenacity import zaza.model import zaza.openstack.utilities.openstack as openstack_utils @@ -39,7 +41,7 @@ def roundrobin_assign_hosts_to_segments(nova_client, masakari_client): segment_ids = segment_ids * len(hypervisors) for hypervisor in hypervisors: target_segment = segment_ids.pop() - hostname = hypervisor.hypervisor_hostname.split('.')[0] + hostname = hypervisor.hypervisor_hostname logging.info('Adding {} to segment {}'.format(hostname, target_segment)) masakari_client.create_host( @@ -81,6 +83,28 @@ def create_segments(segment_number=1, host_assignment_method=None): masakari_client) +@tenacity.retry( + wait=tenacity.wait_exponential(multiplier=2, max=60), + reraise=True, stop=tenacity.stop_after_attempt(10), + retry=tenacity.retry_if_exception_type(ostack_except.ConflictException)) +def enable_host(masakari_client, host, segment): + """Enable hypervisor within masakari. + + :param masakari_client: Authenticated masakari client + :type masakari_client: openstack.instance_ha.v1._proxy.Proxy + :param host: Uuid of host to enable + :type host: str + :param segment: Uuid of segment host is associated with. + :type segment: str + """ + logging.info("Removing maintenance mode from masakari " + "host {}".format(host)) + masakari_client.update_host( + host, + segment_id=segment, + **{'on_maintenance': False}) + + def enable_hosts(masakari_client=None): """Enable all hypervisors within masakari. @@ -98,12 +122,7 @@ def enable_hosts(masakari_client=None): for segment in masakari_client.segments(): for host in masakari_client.hosts(segment_id=segment.uuid): if host.on_maintenance: - logging.info("Removing maintenance mode from masakari " - "host {}".format(host.uuid)) - masakari_client.update_host( - host.uuid, - segment_id=segment.uuid, - **{'on_maintenance': False}) + enable_host(masakari_client, host.uuid, segment.uuid) def _svc_control(unit_name, action, services, model_name): @@ -148,7 +167,7 @@ def _svc_set_systemd_restart_mode(unit_name, service_name, mode, model_name): mode)) cmds = [ ("sed -i -e 's/^Restart=.*/Restart={}/g' " - "/lib/systemd/system/{}.service'").format(mode, service_name), + "/lib/systemd/system/{}.service").format(mode, service_name), 'systemctl daemon-reload'] logging.info('Running {} on {}'.format(cmds, unit_name)) zaza.model.run_on_unit( @@ -179,10 +198,18 @@ def simulate_compute_host_failure(unit_name, model_name): 'stop', ['corosync', 'nova-compute'], model_name) - logging.info('Sending pacemaker_remoted a SIGTERM') + compute_app = unit_name.split('/')[0] + release_pair = openstack_utils.get_current_os_release_pair( + application=compute_app) + if (openstack_utils.get_os_release(release_pair=release_pair) >= + openstack_utils.get_os_release('focal_ussuri')): + pacemaker_proc = '/usr/sbin/pacemaker-remoted' + else: + pacemaker_proc = '/usr/sbin/pacemaker_remoted' + logging.info('Sending {} a SIGTERM'.format(pacemaker_proc)) zaza.model.run_on_unit( unit_name, - 'pkill -9 -f /usr/sbin/pacemaker_remoted', + 'pkill -9 -f {}'.format(pacemaker_proc), model_name=model_name) diff --git a/zaza/openstack/configure/network.py b/zaza/openstack/configure/network.py index 68965d9..ad1a07b 100755 --- a/zaza/openstack/configure/network.py +++ b/zaza/openstack/configure/network.py @@ -129,7 +129,6 @@ def setup_sdn(network_config, keystone_session=None): ext_network = openstack_utils.create_external_network( neutron_client, project_id, - network_config.get("dvr_enabled", False), network_config["external_net_name"]) openstack_utils.create_external_subnet( neutron_client, @@ -184,7 +183,9 @@ def setup_sdn(network_config, keystone_session=None): openstack_utils.add_neutron_secgroup_rules(neutron_client, project_id) -def setup_gateway_ext_port(network_config, keystone_session=None): +def setup_gateway_ext_port(network_config, keystone_session=None, + limit_gws=None, + use_juju_wait=True): """Perform setup external port on Neutron Gateway. For OpenStack on OpenStack scenarios. @@ -193,6 +194,10 @@ def setup_gateway_ext_port(network_config, keystone_session=None): :type network_config: dict :param keystone_session: Keystone session object for undercloud :type keystone_session: keystoneauth1.session.Session object + :param limit_gws: Limit the number of gateways that get a port attached + :type limit_gws: Optional[int] + :param use_juju_wait: Use juju wait (default True) for model to settle + :type use_juju_wait: boolean :returns: None :rtype: None """ @@ -212,12 +217,24 @@ def setup_gateway_ext_port(network_config, keystone_session=None): else: net_id = None + # If we're using netplan, we need to add the new interface to the guest + current_release = openstack_utils.get_os_release() + bionic_queens = openstack_utils.get_os_release('bionic_queens') + if current_release >= bionic_queens: + logging.warn("Adding second interface for dataport to guest netplan " + "for bionic-queens and later") + add_dataport_to_netplan = True + else: + add_dataport_to_netplan = False + logging.info("Configuring network for OpenStack undercloud/provider") openstack_utils.configure_gateway_ext_port( nova_client, neutron_client, - dvr_mode=network_config.get("dvr_enabled", False), - net_id=net_id) + net_id=net_id, + add_dataport_to_netplan=add_dataport_to_netplan, + limit_gws=limit_gws, + use_juju_wait=use_juju_wait) def run_from_cli(**kwargs): @@ -256,6 +273,11 @@ def run_from_cli(**kwargs): default="network.yaml") parser.add_argument("--cacert", help="Path to CA certificate bundle file", default=None) + parser.add_argument("--no-use-juju-wait", + help=("don't use juju wait for the model to settle " + "(default true)"), + action="store_false", + default=True) # Handle CLI options options = parser.parse_args() net_topology = (kwargs.get('net_toplogoy') or @@ -271,12 +293,14 @@ def run_from_cli(**kwargs): network_config = generic_utils.get_network_config( net_topology, ignore_env_vars, net_topology_file) - # Handle network for Openstack-on-Openstack scenarios + # Handle network for OpenStack-on-OpenStack scenarios if juju_utils.get_provider_type() == "openstack": undercloud_ks_sess = openstack_utils.get_undercloud_keystone_session( verify=cacert) setup_gateway_ext_port(network_config, - keystone_session=undercloud_ks_sess) + keystone_session=undercloud_ks_sess, + use_juju_wait=cli_utils.parse_arg( + options, 'no_use_juju_wait')) overcloud_ks_sess = openstack_utils.get_overcloud_keystone_session( verify=cacert) diff --git a/zaza/openstack/configure/pre_deploy_certs.py b/zaza/openstack/configure/pre_deploy_certs.py new file mode 100644 index 0000000..34af066 --- /dev/null +++ b/zaza/openstack/configure/pre_deploy_certs.py @@ -0,0 +1,76 @@ +"""Module to setup pre-deploy TLS certs.""" + +import ipaddress +import itertools +import base64 +import os + +import zaza.openstack.utilities.cert + +ISSUER_NAME = 'OSCI' + + +def set_cidr_certs(): + """Create certs and keys for deploy using IP SANS from CIDR. + + Create a certificate authority certificate and key. The CA cert and key + are then base 64 encoded and assigned to the TEST_CAKEY and + TEST_CACERT environment variables. + + Using the CA key a second certificate and key are generated. The new + certificate has a SAN entry for the first 2^11 IPs in the CIDR. + The cert and key are then base 64 encoded and assigned to the TEST_KEY + and TEST_CERT environment variables. + """ + (cakey, cacert) = zaza.openstack.utilities.cert.generate_cert( + ISSUER_NAME, + generate_ca=True) + os.environ['TEST_CAKEY'] = base64.b64encode(cakey).decode() + os.environ['TEST_CACERT'] = base64.b64encode(cacert).decode() + # We need to restrain the number of SubjectAlternativeNames we attempt to + # put # in the certificate. There is a hard limit for what length the sum + # of all extensions in the certificate can have. + # + # - 2^11 ought to be enough for anybody + alt_names = [] + for addr in itertools.islice( + ipaddress.IPv4Network(os.environ.get('TEST_CIDR_EXT')), 2**11): + alt_names.append(str(addr)) + (key, cert) = zaza.openstack.utilities.cert.generate_cert( + '*.serverstack', + alternative_names=alt_names, + issuer_name=ISSUER_NAME, + signing_key=cakey) + os.environ['TEST_KEY'] = base64.b64encode(key).decode() + os.environ['TEST_CERT'] = base64.b64encode(cert).decode() + + +def set_certs_per_vips(): + """Create certs and keys for deploy using VIPS. + + Create a certificate authority certificate and key. The CA cert and key + are then base 64 encoded and assigned to the TEST_CAKEY and + TEST_CACERT environment variables. + + Using the CA key a certificate and key is generated for each VIP specified + via environment variables. eg if TEST_VIP06=172.20.0.107 is set in the + environment then a cert with a SAN entry for 172.20.0.107 is generated. + The cert and key are then base 64 encoded and assigned to the + TEST_VIP06_KEY and TEST_VIP06_CERT environment variables. + """ + (cakey, cacert) = zaza.openstack.utilities.cert.generate_cert( + ISSUER_NAME, + generate_ca=True) + os.environ['TEST_CAKEY'] = base64.b64encode(cakey).decode() + os.environ['TEST_CACERT'] = base64.b64encode(cacert).decode() + for vip_name, vip_ip in os.environ.items(): + if vip_name.startswith('TEST_VIP'): + (key, cert) = zaza.openstack.utilities.cert.generate_cert( + '*.serverstack', + alternative_names=[vip_ip], + issuer_name=ISSUER_NAME, + signing_key=cakey) + os.environ[ + '{}_KEY'.format(vip_name)] = base64.b64encode(key).decode() + os.environ[ + '{}_CERT'.format(vip_name)] = base64.b64encode(cert).decode() diff --git a/zaza/openstack/configure/telemetry.py b/zaza/openstack/configure/telemetry.py new file mode 100644 index 0000000..8bc01b8 --- /dev/null +++ b/zaza/openstack/configure/telemetry.py @@ -0,0 +1,145 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Configure and manage masakari. + +Functions for managing masakari resources and simulating compute node loss +and recovery. +""" + +import logging +import tenacity +import time + +import zaza.model + + +def ceilometer_upgrade(application_name=None, model_name=None): + """Run ceilometer upgrade action. + + :param application_name: Name of application to run action against. + :type application_name: str + :param model_name: Name of model application_name resides in. + :type model_name: str + """ + zaza.model.run_action_on_leader( + application_name, + 'ceilometer-upgrade', + model_name=model_name, + action_params={}) + + +def get_alarm(aodh_client, alarm_name): + """Return the alarm with the given name. + + :param aodh_client: Authenticated aodh v2 client + :type aodh_client: aodhclient.v2.client.Client + :param alarm_name: Name of alarm to search for + :type alarm_name: str + :returns: Returns a dict of alarm data. + :rtype: {} or None + """ + for alarm in aodh_client.alarm.list(): + if alarm['name'] == alarm_name: + return alarm + return None + + +def alarm_cache_wait(): + """Wait for alarm cache to clear.""" + # AODH has an alarm cache (see event_alarm_cache_ttl in aodh.conf). This + # means deleted alarms can persist and fire. The default is 60s and is + # currently not configrable via the charm so 61s is a safe assumption. + time.sleep(61) + + +def delete_alarm(aodh_client, alarm_name, cache_wait=False): + """Delete alarm with given name. + + :param aodh_client: Authenticated aodh v2 client + :type aodh_client: aodhclient.v2.client.Client + :param alarm_name: Name of alarm to delete + :type alarm_name: str + :param cache_wait: Whether to wait for cache to clear after deletion. + :type cache_wait: bool + """ + alarm = get_alarm(aodh_client, alarm_name) + if alarm: + aodh_client.alarm.delete(alarm['alarm_id']) + if cache_wait: + alarm_cache_wait() + + +def get_alarm_state(aodh_client, alarm_id): + """Return the state of the alarm with the given name. + + :param aodh_client: Authenticated aodh v2 client + :type aodh_client: aodhclient.v2.client.Client + :param alarm_id: ID of provided alarm + :param alarm_id: str + :returns: State of given alarm + :rtype: str + """ + alarm = aodh_client.alarm.get(alarm_id) + return alarm['state'] + + +def create_server_power_off_alarm(aodh_client, alarm_name, server_uuid): + """Create an alarm which triggers when an instance powers off. + + :param aodh_client: Authenticated aodh v2 client + :type aodh_client: aodhclient.v2.client.Client + :param alarm_name: Name of alarm to delete + :type alarm_name: str + :param server_uuid: UUID of server to monitor + :type server_uuid: str + :returns: Dict of alarm data + :rtype: {} + """ + alarm_def = { + 'type': 'event', + 'name': alarm_name, + 'description': 'Instance powered OFF', + 'alarm_actions': ['log://'], + 'ok_actions': ['log://'], + 'insufficient_data_actions': ['log://'], + 'event_rule': { + 'event_type': 'compute.instance.power_off.*', + 'query': [{'field': 'traits.instance_id', + 'op': 'eq', + 'type': 'string', + 'value': server_uuid}]}} + return aodh_client.alarm.create(alarm_def) + + +def block_until_alarm_state(aodh_client, alarm_id, target_state='alarm'): + """Block until alarm has reached target state. + + :param aodh_client: Authenticated aodh v2 client + :type aodh_client: aodhclient.v2.client.Client + :param alarm_id: ID of provided alarm + :type alarm_id: str + :param target_state: uuid of alarm to check + :stype target_state: str + """ + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(3), + wait=tenacity.wait_exponential(multiplier=1, min=2, max=10)): + with attempt: + alarm_state = get_alarm_state( + aodh_client, + alarm_id) + + logging.info('Alarm in state {}'.format(alarm_state)) + assert alarm_state == target_state diff --git a/zaza/openstack/utilities/__init__.py b/zaza/openstack/utilities/__init__.py index 35b5a14..5798f26 100644 --- a/zaza/openstack/utilities/__init__.py +++ b/zaza/openstack/utilities/__init__.py @@ -13,3 +13,155 @@ # limitations under the License. """Collection of utilities to support zaza tests etc.""" + + +import time + +from keystoneauth1.exceptions.connection import ConnectFailure + + +class ObjectRetrierWraps(object): + """An automatic retrier for an object. + + This is designed to be used with an instance of an object. Basically, it + wraps the object and any attributes that are fetched. Essentially, it is + used to provide retries on method calls on openstack client objects in + tests to increase robustness of tests. + + Although, technically this is bad, retries can be logged with the optional + log method. + + Usage: + + # get a client that does 3 retries, waits 5 seconds between retries and + # retries on any error. + some_client = ObjectRetrierWraps(get_some_client) + # this gets retried up to 3 times. + things = some_client.list_things() + + Note, it is quite simple. It wraps the object and on a getattr(obj, name) + it finds the name and then returns a wrapped version of that name. On a + call, it returns the value of that call. It only wraps objects in the + chain that are either callable or have a __getattr__() method. i.e. one + that can then be retried or further fetched. This means that if a.b.c() is + a chain of objects, and we just wrap 'a', then 'b' and 'c' will both be + wrapped that the 'c' object __call__() method will be the one that is + actually retried. + + Note: this means that properties that do method calls won't be retried. + This is a limitation that may be addressed in the future, if it is needed. + """ + + def __init__(self, obj, num_retries=3, initial_interval=5.0, backoff=1.0, + max_interval=15.0, total_wait=30.0, retry_exceptions=None, + log=None): + """Initialise the retrier object. + + :param obj: The object to wrap. Ought to be an instance of something + that you want to get methods on to call or be called itself. + :type obj: Any + :param num_retries: The (maximum) number of retries. May not be hit if + the total_wait time is exceeded. + :type num_retries: int + :param initial_interval: The initial or starting interval between + retries. + :type initial_interval: float + :param backoff: The exponential backoff multiple. 1 is linear. + :type backoff: float + :param max_interval: The maximum interval between retries. + If backoff is >1 then the initial_interval will never grow larger + than max_interval. + :type max_interval: float + :param retry_exceptions: The list of exceptions to retry on, or None. + If a list, then it will only retry if the exception is one of the + ones in the list. + :type retry_exceptions: List[Exception] + """ + # Note we use semi-private variable names that shouldn't clash with any + # on the actual object. + self.__obj = obj + self.__kwargs = { + 'num_retries': num_retries, + 'initial_interval': initial_interval, + 'backoff': backoff, + 'max_interval': max_interval, + 'total_wait': total_wait, + 'retry_exceptions': retry_exceptions, + 'log': log or (lambda x: None), + } + + def __getattr__(self, name): + """Get attribute; delegates to wrapped object.""" + # Note the above may generate an attribute error; we expect this and + # will fail with an attribute error. + attr = getattr(self.__obj, name) + if callable(attr) or hasattr(attr, "__getattr__"): + return ObjectRetrierWraps(attr, **self.__kwargs) + else: + return attr + # TODO(ajkavanagh): Note detecting a property is a bit trickier. we + # can do isinstance(attr, property), but then the act of accessing it + # is what calls it. i.e. it would fail at the getattr(self.__obj, + # name) stage. The solution is to check first, and if it's a property, + # then treat it like the retrier. However, I think this is too + # complex for the first go, and to use manual retries in that instance. + + def __call__(self, *args, **kwargs): + """Call the object; delegates to the wrapped object.""" + obj = self.__obj + retry = 0 + wait = self.__kwargs['initial_interval'] + max_interval = self.__kwargs['max_interval'] + log = self.__kwargs['log'] + backoff = self.__kwargs['backoff'] + total_wait = self.__kwargs['total_wait'] + num_retries = self.__kwargs['num_retries'] + retry_exceptions = self.__kwargs['retry_exceptions'] + wait_so_far = 0 + while True: + try: + return obj(*args, **kwargs) + except Exception as e: + # if retry_exceptions is not None, or the type of the exception + # is not in the list of retries, then raise an exception + # immediately. This means that if retry_exceptions is None, + # then the method is always retried. + if (retry_exceptions is not None and + type(e) not in retry_exceptions): + raise + retry += 1 + if retry > num_retries: + log("{}: exceeded number of retries, so erroring out" + .format(str(obj))) + raise e + log("{}: call failed: retrying in {} seconds" + .format(str(obj), wait)) + time.sleep(wait) + wait_so_far += wait + if wait_so_far >= total_wait: + raise e + wait = wait * backoff + if wait > max_interval: + wait = max_interval + + +def retry_on_connect_failure(client, **kwargs): + """Retry an object that eventually gets resolved to a call. + + Specifically, this uses ObjectRetrierWraps but only against the + keystoneauth1.exceptions.connection.ConnectFailure exeception. + + :params client: the object that may throw and exception when called. + :type client: Any + :params **kwargs: the arguments supplied to the ObjectRetrierWraps init + method + :type **kwargs: Dict[Any] + :returns: client wrapped in an ObjectRetrierWraps instance + :rtype: ObjectRetrierWraps[client] + """ + kwcopy = kwargs.copy() + if 'retry_exceptions' not in kwcopy: + kwcopy['retry_exceptions'] = [] + if ConnectFailure not in kwcopy['retry_exceptions']: + kwcopy['retry_exceptions'].append(ConnectFailure) + return ObjectRetrierWraps(client, **kwcopy) diff --git a/zaza/openstack/utilities/ceph.py b/zaza/openstack/utilities/ceph.py index 2c54512..a01f56e 100644 --- a/zaza/openstack/utilities/ceph.py +++ b/zaza/openstack/utilities/ceph.py @@ -1,9 +1,16 @@ """Module containing Ceph related utilities.""" - +import json import logging -import zaza.openstack.utilities.openstack as openstack_utils import zaza.model as zaza_model +import zaza.utilities.juju as juju_utils + +import zaza.openstack.utilities.openstack as openstack_utils + +REPLICATED_POOL_TYPE = 'replicated' +ERASURE_POOL_TYPE = 'erasure-coded' +REPLICATED_POOL_CODE = 1 +ERASURE_POOL_CODE = 3 def get_expected_pools(radosgw=False): @@ -97,6 +104,87 @@ def get_ceph_pools(unit_name, model_name=None): return pools +def get_ceph_pool_details(query_leader=True, unit_name=None, model_name=None): + """Get ceph pool details. + + Return a list of ceph pools details dicts. + + :param query_leader: Whether to query the leader for pool details. + :type query_leader: bool + :param unit_name: Name of unit to get the pools on if query_leader is False + :type unit_name: string + :param model_name: Name of model to operate in + :type model_name: str + :returns: Dict of ceph pools + :rtype: List[Dict,] + :raise: zaza_model.CommandRunFailed + """ + cmd = 'sudo ceph osd pool ls detail -f json' + if query_leader and unit_name: + raise ValueError("Cannot set query_leader and unit_name") + if query_leader: + result = zaza_model.run_on_leader( + 'ceph-mon', + cmd, + model_name=model_name) + else: + result = zaza_model.run_on_unit( + unit_name, + cmd, + model_name=model_name) + if int(result.get('Code')) != 0: + raise zaza_model.CommandRunFailed(cmd, result) + return json.loads(result.get('Stdout')) + + +def get_ceph_df(unit_name, model_name=None): + """Return dict of ceph df json output, including ceph pool state. + + :param unit_name: Name of the unit to get ceph df + :type unit_name: string + :param model_name: Name of model to operate in + :type model_name: str + :returns: Dict of ceph df output + :rtype: dict + :raise: zaza.model.CommandRunFailed + """ + cmd = 'sudo ceph df --format=json' + result = zaza_model.run_on_unit(unit_name, cmd, model_name=model_name) + if result.get('Code') != '0': + raise zaza_model.CommandRunFailed(cmd, result) + return json.loads(result.get('Stdout')) + + +def get_ceph_pool_sample(unit_name, pool_id=0, model_name=None): + """Return list of ceph pool attributes. + + Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param unit_name: Name of the unit to get the pool sample + :type unit_name: string + :param pool_id: Ceph pool ID + :type pool_id: int + :param model_name: Name of model to operate in + :type model_name: str + :returns: List of pool name, object count, kb disk space used + :rtype: list + :raises: zaza.model.CommandRunFailed + """ + df = get_ceph_df(unit_name, model_name) + for pool in df['pools']: + if pool['id'] == pool_id: + pool_name = pool['name'] + obj_count = pool['stats']['objects'] + kb_used = pool['stats']['kb_used'] + + logging.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def get_rbd_hash(unit_name, pool, image, model_name=None): """Get SHA512 hash of RBD image. @@ -118,3 +206,37 @@ def get_rbd_hash(unit_name, pool, image, model_name=None): if result.get('Code') != '0': raise zaza_model.CommandRunFailed(cmd, result) return result.get('Stdout').rstrip() + + +def get_pools_from_broker_req(application_or_unit, model_name=None): + """Get pools requested by application or unit. + + By retrieving and parsing broker request from relation data we can get a + list of pools a unit has requested. + + :param application_or_unit: Name of application or unit that is at the + other end of a ceph-mon relation. + :type application_or_unit: str + :param model_name: Name of Juju model to operate on + :type model_name: Optional[str] + :returns: List of pools requested. + :rtype: List[str] + :raises: KeyError + """ + # NOTE: we do not pass on a name for the remote_interface_name as that + # varies between the Ceph consuming applications. + relation_data = juju_utils.get_relation_from_unit( + 'ceph-mon', application_or_unit, None, model_name=model_name) + + # NOTE: we probably should consume the Ceph broker code from c-h but c-h is + # such a beast of a dependency so let's defer adding it to Zaza if we can. + broker_req = json.loads(relation_data['broker_req']) + + # A charm may request modifications to an existing pool by adding multiple + # 'create-pool' broker requests so we need to deduplicate the list before + # returning it. + return list(set([ + op['name'] + for op in broker_req['ops'] + if op['op'] == 'create-pool' + ])) diff --git a/zaza/openstack/utilities/cert.py b/zaza/openstack/utilities/cert.py index 9cb0c19..d138386 100644 --- a/zaza/openstack/utilities/cert.py +++ b/zaza/openstack/utilities/cert.py @@ -94,10 +94,10 @@ def generate_cert(common_name, cryptography.x509.oid.NameOID.COMMON_NAME, issuer_name), ])) builder = builder.not_valid_before( - datetime.datetime.today() - datetime.timedelta(0, 1, 0), + datetime.datetime.utcnow() - datetime.timedelta(0, 1, 0), ) builder = builder.not_valid_after( - datetime.datetime.today() + datetime.timedelta(30, 0, 0), + datetime.datetime.utcnow() + datetime.timedelta(30, 0, 0), ) builder = builder.serial_number(cryptography.x509.random_serial_number()) builder = builder.public_key(public_key) @@ -190,7 +190,7 @@ def sign_csr(csr, ca_private_key, ca_cert=None, issuer_name=None, datetime.datetime.today() - datetime.timedelta(1, 0, 0), ) builder = builder.not_valid_after( - datetime.datetime.today() + datetime.timedelta(80, 0, 0), + datetime.datetime.today() + datetime.timedelta(4000, 0, 0), ) builder = builder.subject_name(new_csr.subject) builder = builder.public_key(new_csr.public_key()) diff --git a/zaza/openstack/utilities/charm_upgrade.py b/zaza/openstack/utilities/charm_upgrade.py new file mode 100644 index 0000000..017b642 --- /dev/null +++ b/zaza/openstack/utilities/charm_upgrade.py @@ -0,0 +1,15 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of functions to support charm upgrade testing.""" diff --git a/zaza/openstack/utilities/cli.py b/zaza/openstack/utilities/cli.py index 68a659f..f8071be 100644 --- a/zaza/openstack/utilities/cli.py +++ b/zaza/openstack/utilities/cli.py @@ -16,6 +16,7 @@ import logging import os +import sys def parse_arg(options, arg, multiargs=False): @@ -51,6 +52,6 @@ def setup_logging(): rootLogger = logging.getLogger() rootLogger.setLevel('INFO') if not rootLogger.hasHandlers(): - consoleHandler = logging.StreamHandler() + consoleHandler = logging.StreamHandler(sys.stdout) consoleHandler.setFormatter(logFormatter) rootLogger.addHandler(consoleHandler) diff --git a/zaza/openstack/utilities/exceptions.py b/zaza/openstack/utilities/exceptions.py index 868a7c6..364ab49 100644 --- a/zaza/openstack/utilities/exceptions.py +++ b/zaza/openstack/utilities/exceptions.py @@ -162,6 +162,18 @@ class CephPoolNotFound(Exception): pass +class CephPoolNotConfigured(Exception): + """Ceph pool not configured properly.""" + + pass + + +class CephGenericError(Exception): + """A generic/other Ceph error occurred.""" + + pass + + class NovaGuestMigrationFailed(Exception): """Nova guest migration failed.""" @@ -172,3 +184,21 @@ class NovaGuestRestartFailed(Exception): """Nova guest restart failed.""" pass + + +class NovaGuestNoPingResponse(Exception): + """Nova guest failed to respond to pings.""" + + pass + + +class PolicydError(Exception): + """Policyd override failed.""" + + pass + + +class CACERTNotFound(Exception): + """Could not find cacert.""" + + pass diff --git a/zaza/openstack/utilities/generic.py b/zaza/openstack/utilities/generic.py index 9bb4bc9..fcd7989 100644 --- a/zaza/openstack/utilities/generic.py +++ b/zaza/openstack/utilities/generic.py @@ -14,22 +14,61 @@ """Collection of functions that did not fit anywhere else.""" +import asyncio import logging import os import socket import subprocess import telnetlib +import tempfile import yaml from zaza import model -from zaza.openstack.utilities import juju as juju_utils from zaza.openstack.utilities import exceptions as zaza_exceptions from zaza.openstack.utilities.os_versions import UBUNTU_OPENSTACK_RELEASE +from zaza.utilities import juju as juju_utils -SUBORDINATE_PAUSE_RESUME_BLACKLIST = [ - "cinder-ceph", -] +def assertActionRanOK(action): + """Assert that the remote action ran successfully. + + Example usage:: + + self.assertActionRanOK(model.run_action( + unit, + 'pause', + model_name=self.model_name)) + + self.assertActionRanOK(model.run_action_on_leader( + unit, + 'pause', + model_name=self.model_name)) + + :param action: Action object to check. + :type action: juju.action.Action + :raises: AssertionError if the assertion fails. + """ + if action.status != 'completed': + msg = ("Action '{name}' exited with status '{status}': " + "'{message}'").format(**action.data) + raise AssertionError(msg) + + +def assertRemoteRunOK(run_output): + """Use with zaza.model.run_on_unit. + + Example usage:: + + self.assertRemoteRunOK(zaza.model.run_on_unit( + unit, + 'ls /tmp/')) + + :param action: Dict returned from remote run. + :type action: dict + :raises: AssertionError if the assertion fails. + """ + if int(run_output['Code']) != 0: + raise AssertionError("Command failed: {}".format(run_output)) def dict_to_yaml(dict_data): @@ -75,21 +114,36 @@ def get_network_config(net_topology, ignore_env_vars=False, return net_info -def get_pkg_version(application, pkg): +def get_unit_hostnames(units, fqdn=False): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + cmd = 'hostname' + if fqdn: + cmd = cmd + ' -f' + output = model.run_on_unit(unit.entity_id, cmd) + hostname = output['Stdout'].strip() + host_names[unit.entity_id] = hostname + return host_names + + +def get_pkg_version(application, pkg, model_name=None): """Return package version. :param application: Application name :type application: string :param pkg: Package name :type pkg: string + :param model_name: Name of model to query. + :type model_name: str :returns: List of package version :rtype: list """ versions = [] - units = model.get_units(application) + units = model.get_units(application, model_name=model_name) for unit in units: cmd = 'dpkg -l | grep {}'.format(pkg) - out = juju_utils.remote_run(unit.entity_id, cmd) + out = juju_utils.remote_run(unit.entity_id, cmd, model_name=model_name) versions.append(out.split('\n')[0].split()[2]) if len(set(versions)) != 1: raise Exception('Unexpected output from pkg version check') @@ -123,27 +177,30 @@ def get_undercloud_env_vars(): export NET_ID="a705dd0f-5571-4818-8c30-4132cc494668" export GATEWAY="172.17.107.1" export CIDR_EXT="172.17.107.0/24" - export NAMESERVER="10.5.0.2" + export NAME_SERVER="10.5.0.2" export FIP_RANGE="172.17.107.200:172.17.107.249" :returns: Network environment variables :rtype: dict """ - # Handle backward compatibile OSCI enviornment variables + # Handle OSCI environment variables + # Note: TEST_* is the only prefix honored _vars = {} - _vars['net_id'] = os.environ.get('NET_ID') - _vars['external_dns'] = os.environ.get('NAMESERVER') - _vars['default_gateway'] = os.environ.get('GATEWAY') - _vars['external_net_cidr'] = os.environ.get('CIDR_EXT') + _vars['net_id'] = os.environ.get('TEST_NET_ID') + _vars['external_dns'] = os.environ.get('TEST_NAME_SERVER') + _vars['default_gateway'] = os.environ.get('TEST_GATEWAY') + _vars['external_net_cidr'] = os.environ.get('TEST_CIDR_EXT') # Take FIP_RANGE and create start and end floating ips - _fip_range = os.environ.get('FIP_RANGE') - if _fip_range and ':' in _fip_range: - _vars['start_floating_ip'] = os.environ.get('FIP_RANGE').split(':')[0] - _vars['end_floating_ip'] = os.environ.get('FIP_RANGE').split(':')[1] + _fip_range = os.environ.get('TEST_FIP_RANGE') + if _fip_range is not None and ':' in _fip_range: + _vars['start_floating_ip'] = os.environ.get( + 'TEST_FIP_RANGE').split(':')[0] + _vars['end_floating_ip'] = os.environ.get( + 'TEST_FIP_RANGE').split(':')[1] - # Env var naming consistent with zaza.openstack.configure.network - # functions takes priority. Override backward compatible settings. + # zaza.openstack.configure.network functions variables still take priority + # for local testing. Override OSCI settings. _keys = ['default_gateway', 'start_floating_ip', 'end_floating_ip', @@ -177,230 +234,6 @@ def get_yaml_config(config_file): return yaml.safe_load(open(config_file, 'r').read()) -def series_upgrade_non_leaders_first(application, from_series="trusty", - to_series="xenial", - completed_machines=[]): - """Series upgrade non leaders first. - - Wrap all the functionality to handle series upgrade for charms - which must have non leaders upgraded first. - - :param application: Name of application to upgrade series - :type application: str - :param from_series: The series from which to upgrade - :type from_series: str - :param to_series: The series to which to upgrade - :type to_series: str - :param completed_machines: List of completed machines which do no longer - require series upgrade. - :type completed_machines: list - :returns: None - :rtype: None - """ - status = model.get_status().applications[application] - leader = None - non_leaders = [] - for unit in status["units"]: - if status["units"][unit].get("leader"): - leader = unit - else: - non_leaders.append(unit) - - # Series upgrade the non-leaders first - for unit in non_leaders: - machine = status["units"][unit]["machine"] - if machine not in completed_machines: - logging.info("Series upgrade non-leader unit: {}" - .format(unit)) - series_upgrade(unit, machine, - from_series=from_series, to_series=to_series, - origin=None) - completed_machines.append(machine) - else: - logging.info("Skipping unit: {}. Machine: {} already upgraded. " - .format(unit, machine, application)) - model.block_until_all_units_idle() - - # Series upgrade the leader - machine = status["units"][leader]["machine"] - logging.info("Series upgrade leader: {}".format(leader)) - if machine not in completed_machines: - series_upgrade(leader, machine, - from_series=from_series, to_series=to_series, - origin=None) - completed_machines.append(machine) - else: - logging.info("Skipping unit: {}. Machine: {} already upgraded." - .format(unit, machine, application)) - model.block_until_all_units_idle() - - -def series_upgrade_application(application, pause_non_leader_primary=True, - pause_non_leader_subordinate=True, - from_series="trusty", to_series="xenial", - origin='openstack-origin', - completed_machines=[], - files=None, workaround_script=None): - """Series upgrade application. - - Wrap all the functionality to handle series upgrade for a given - application. Including pausing non-leader units. - - :param application: Name of application to upgrade series - :type application: str - :param pause_non_leader_primary: Whether the non-leader applications should - be paused - :type pause_non_leader_primary: bool - :param pause_non_leader_subordinate: Whether the non-leader subordinate - hacluster applications should be - paused - :type pause_non_leader_subordinate: bool - :param from_series: The series from which to upgrade - :type from_series: str - :param to_series: The series to which to upgrade - :type to_series: str - :param origin: The configuration setting variable name for changing origin - source. (openstack-origin or source) - :type origin: str - :param completed_machines: List of completed machines which do no longer - require series upgrade. - :type completed_machines: list - :param files: Workaround files to scp to unit under upgrade - :type files: list - :param workaround_script: Workaround script to run during series upgrade - :type workaround_script: str - :returns: None - :rtype: None - """ - status = model.get_status().applications[application] - - # For some applications (percona-cluster) the leader unit must upgrade - # first. For API applications the non-leader haclusters must be paused - # before upgrade. Finally, for some applications this is arbitrary but - # generalized. - leader = None - non_leaders = [] - for unit in status["units"]: - if status["units"][unit].get("leader"): - leader = unit - else: - non_leaders.append(unit) - - # Pause the non-leaders - for unit in non_leaders: - if pause_non_leader_subordinate: - if status["units"][unit].get("subordinates"): - for subordinate in status["units"][unit]["subordinates"]: - _app = subordinate.split('/')[0] - if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST: - logging.info("Skipping pausing {} - blacklisted" - .format(subordinate)) - else: - logging.info("Pausing {}".format(subordinate)) - model.run_action( - subordinate, "pause", action_params={}) - if pause_non_leader_primary: - logging.info("Pausing {}".format(unit)) - model.run_action(unit, "pause", action_params={}) - - machine = status["units"][leader]["machine"] - # Series upgrade the leader - logging.info("Series upgrade leader: {}".format(leader)) - if machine not in completed_machines: - series_upgrade(leader, machine, - from_series=from_series, to_series=to_series, - origin=origin, workaround_script=workaround_script, - files=files) - completed_machines.append(machine) - else: - logging.info("Skipping unit: {}. Machine: {} already upgraded." - "But setting origin on the application {}" - .format(unit, machine, application)) - logging.info("Set origin on {}".format(application)) - set_origin(application, origin) - model.block_until_all_units_idle() - - # Series upgrade the non-leaders - for unit in non_leaders: - machine = status["units"][unit]["machine"] - if machine not in completed_machines: - logging.info("Series upgrade non-leader unit: {}" - .format(unit)) - series_upgrade(unit, machine, - from_series=from_series, to_series=to_series, - origin=origin, workaround_script=workaround_script, - files=files) - completed_machines.append(machine) - else: - logging.info("Skipping unit: {}. Machine: {} already upgraded. " - "But setting origin on the application {}" - .format(unit, machine, application)) - logging.info("Set origin on {}".format(application)) - set_origin(application, origin) - model.block_until_all_units_idle() - - -def series_upgrade(unit_name, machine_num, - from_series="trusty", to_series="xenial", - origin='openstack-origin', - files=None, workaround_script=None): - """Perform series upgrade on a unit. - - :param unit_name: Unit Name - :type unit_name: str - :param machine_num: Machine number - :type machine_num: str - :param from_series: The series from which to upgrade - :type from_series: str - :param to_series: The series to which to upgrade - :type to_series: str - :param origin: The configuration setting variable name for changing origin - source. (openstack-origin or source) - :type origin: str - :param files: Workaround files to scp to unit under upgrade - :type files: list - :param workaround_script: Workaround script to run during series upgrade - :type workaround_script: str - :returns: None - :rtype: None - """ - logging.info("Series upgrade {}".format(unit_name)) - application = unit_name.split('/')[0] - set_dpkg_non_interactive_on_unit(unit_name) - logging.info("Prepare series upgrade on {}".format(machine_num)) - model.prepare_series_upgrade(machine_num, to_series=to_series) - logging.info("Waiting for workload status 'blocked' on {}" - .format(unit_name)) - model.block_until_unit_wl_status(unit_name, "blocked") - logging.info("Waiting for model idleness") - model.block_until_all_units_idle() - wrap_do_release_upgrade(unit_name, from_series=from_series, - to_series=to_series, files=files, - workaround_script=workaround_script) - logging.info("Reboot {}".format(unit_name)) - reboot(unit_name) - logging.info("Waiting for workload status 'blocked' on {}" - .format(unit_name)) - model.block_until_unit_wl_status(unit_name, "blocked") - logging.info("Waiting for model idleness") - model.block_until_all_units_idle() - logging.info("Set origin on {}".format(application)) - # Allow for charms which have neither source nor openstack-origin - if origin: - set_origin(application, origin) - model.block_until_all_units_idle() - logging.info("Complete series upgrade on {}".format(machine_num)) - model.complete_series_upgrade(machine_num) - model.block_until_all_units_idle() - logging.info("Waiting for workload status 'active' on {}" - .format(unit_name)) - model.block_until_unit_wl_status(unit_name, "active") - model.block_until_all_units_idle() - # This step may be performed by juju in the future - logging.info("Set series on {} to {}".format(application, to_series)) - model.set_series(application, to_series) - - def set_origin(application, origin='openstack-origin', pocket='distro'): """Set the configuration option for origin source. @@ -419,44 +252,23 @@ def set_origin(application, origin='openstack-origin', pocket='distro'): model.set_application_config(application, {origin: pocket}) -def wrap_do_release_upgrade(unit_name, from_series="trusty", - to_series="xenial", - files=None, workaround_script=None): - """Wrap do release upgrade. +async def async_set_origin(application, origin='openstack-origin', + pocket='distro'): + """Set the configuration option for origin source. - In a production environment this step would be run administratively. - For testing purposes we need this automated. - - :param unit_name: Unit Name - :type unit_name: str - :param from_series: The series from which to upgrade - :type from_series: str - :param to_series: The series to which to upgrade - :type to_series: str - :param files: Workaround files to scp to unit under upgrade - :type files: list - :param workaround_script: Workaround script to run during series upgrade - :type workaround_script: str + :param application: Name of application to upgrade series + :type application: str + :param origin: The configuration setting variable name for changing origin + source. (openstack-origin or source) + :type origin: str + :param pocket: Origin source cloud pocket. + i.e. 'distro' or 'cloud:xenial-newton' + :type pocket: str :returns: None :rtype: None """ - # Pre upgrade hacks - # There are a few necessary hacks to accomplish an automated upgrade - # to overcome some packaging bugs. - # Copy scripts - if files: - logging.info("SCP files") - for _file in files: - logging.info("SCP {}".format(_file)) - model.scp_to_unit(unit_name, _file, os.path.basename(_file)) - - # Run Script - if workaround_script: - logging.info("Running workaround script") - run_via_ssh(unit_name, workaround_script) - - # Actually do the do_release_upgrade - do_release_upgrade(unit_name) + logging.info("Set origin on {} to {}".format(application, origin)) + await model.async_set_application_config(application, {origin: pocket}) def run_via_ssh(unit_name, cmd): @@ -481,24 +293,53 @@ def run_via_ssh(unit_name, cmd): logging.warn(e) -def do_release_upgrade(unit_name): - """Run do-release-upgrade noninteractive. +async def async_run_via_ssh(unit_name, cmd, raise_exceptions=False): + """Run command on unit via ssh. + + For executing commands on units when the juju agent is down. :param unit_name: Unit Name - :type unit_name: str + :param cmd: Command to execute on remote unit + :type cmd: str :returns: None :rtype: None """ - logging.info('Upgrading ' + unit_name) - # NOTE: It is necessary to run this via juju ssh rather than juju run due - # to timeout restrictions and error handling. - cmd = ['juju', 'ssh', unit_name, 'sudo', 'DEBIAN_FRONTEND=noninteractive', - 'do-release-upgrade', '-f', 'DistUpgradeViewNonInteractive'] + if "sudo" not in cmd: + # cmd.insert(0, "sudo") + cmd = "sudo {}".format(cmd) + cmd = ['juju', 'ssh', unit_name, cmd] try: - subprocess.check_call(cmd) + await check_call(cmd) except subprocess.CalledProcessError as e: - logging.warn("Failed do-release-upgrade for {}".format(unit_name)) + logging.warn("Failed command {} on {}".format(cmd, unit_name)) logging.warn(e) + if raise_exceptions: + raise e + + +def check_commands_on_units(commands, units): + """Check that all commands in a list exit zero on all units in a list. + + :param commands: list of bash commands + :param units: list of unit pointers + :returns: None if successful; Failure message otherwise + """ + logging.debug('Checking exit codes for {} commands on {} ' + 'units...'.format(len(commands), + len(units))) + + for u in units: + for cmd in commands: + output = model.run_on_unit(u.entity_id, cmd) + if int(output['Code']) == 0: + logging.debug('{} `{}` returned {} ' + '(OK)'.format(u.entity_id, + cmd, output['Code'])) + else: + return ('{} `{}` returned {} ' + '{}'.format(u.entity_id, + cmd, output['Code'], output)) + return None def reboot(unit_name): @@ -519,6 +360,45 @@ def reboot(unit_name): pass +async def async_reboot(unit_name): + """Reboot unit. + + :param unit_name: Unit Name + :type unit_name: str + :returns: None + :rtype: None + """ + # NOTE: When used with series upgrade the agent will be down. + # Even juju run will not work + await async_run_via_ssh(unit_name, "sudo reboot && exit") + + +async def check_call(cmd): + """Asynchronous function to check a subprocess call. + + :param cmd: Command to execute + :type cmd: List[str] + :returns: None + :rtype: None + """ + proc = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE) + stdout, stderr = await proc.communicate() + stdout = stdout.decode('utf-8') + stderr = stderr.decode('utf-8') + if proc.returncode != 0: + logging.warn("STDOUT: {}".format(stdout)) + logging.warn("STDERR: {}".format(stderr)) + raise subprocess.CalledProcessError(proc.returncode, cmd) + else: + if stderr: + logging.info("STDERR: {} ({})".format(stderr, ' '.join(cmd))) + if stdout: + logging.info("STDOUT: {} ({})".format(stdout, ' '.join(cmd))) + + def set_dpkg_non_interactive_on_unit( unit_name, apt_conf_d="/etc/apt/apt.conf.d/50unattended-upgrades"): """Set dpkg options on unit. @@ -535,6 +415,22 @@ def set_dpkg_non_interactive_on_unit( model.run_on_unit(unit_name, cmd) +async def async_set_dpkg_non_interactive_on_unit( + unit_name, apt_conf_d="/etc/apt/apt.conf.d/50unattended-upgrades"): + """Set dpkg options on unit. + + :param unit_name: Unit Name + :type unit_name: str + :param apt_conf_d: Apt.conf file to update + :type apt_conf_d: str + """ + DPKG_NON_INTERACTIVE = 'DPkg::options { "--force-confdef"; };' + # Check if the option exists. If not, add it to the apt.conf.d file + cmd = ("grep '{option}' {file_name} || echo '{option}' >> {file_name}" + .format(option=DPKG_NON_INTERACTIVE, file_name=apt_conf_d)) + await model.async_run_on_unit(unit_name, cmd) + + def get_process_id_list(unit_name, process_name, expect_success=True): """Get a list of process ID(s). @@ -684,26 +580,129 @@ def get_ubuntu_release(ubuntu_name): return index +def get_file_contents(unit, f): + """Get contents of a file on a remote unit.""" + return model.run_on_unit(unit.entity_id, + "cat {}".format(f))['Stdout'] + + def is_port_open(port, address): - """Determine if TCP port is accessible. + """Determine if TCP port is accessible. - Connect to the MySQL port on the VIP. + Connect to the MySQL port on the VIP. - :param port: Port number - :type port: str - :param address: IP address - :type port: str - :returns: True if port is reachable - :rtype: boolean - """ - try: - telnetlib.Telnet(address, port) - return True - except socket.error as e: - if e.errno == 113: - logging.error("could not connect to {}:{}" - .format(address, port)) - if e.errno == 111: - logging.error("connection refused connecting" - " to {}:{}".format(address, port)) - return False + :param port: Port number + :type port: str + :param address: IP address + :type port: str + :returns: True if port is reachable + :rtype: boolean + """ + try: + telnetlib.Telnet(address, port) + return True + except socket.error as e: + if e.errno == 113: + logging.error("could not connect to {}:{}" + .format(address, port)) + if e.errno == 111: + logging.error("connection refused connecting" + " to {}:{}".format(address, port)) + return False + + +def port_knock_units(units, port=22, expect_success=True): + """Check if specific port is open on units. + + Open a TCP socket to check for a listening sevice on each listed juju unit. + :param units: list of unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for u in units: + host = u.public_address + connected = is_port_open(port, host) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + +def get_series(unit): + """Ubuntu release name running on unit.""" + result = model.run_on_unit(unit.entity_id, + "lsb_release -cs") + return result['Stdout'].strip() + + +def systemctl(unit, service, command="restart"): + """Run systemctl command on a unit. + + :param unit: Unit object or unit name + :type unit: Union[Unit,string] + :param service: Name of service to act on + :type service: string + :param command: Name of command. i.e. start, stop, restart + :type command: string + :raises: AssertionError if the command is unsuccessful + :returns: None if successful + """ + cmd = "/bin/systemctl {} {}".format(command, service) + + # Check if this is a unit object or string name of a unit + try: + unit.entity_id + except AttributeError: + unit = model.get_unit_from_name(unit) + + result = model.run_on_unit( + unit.entity_id, cmd) + assert int(result['Code']) == 0, ( + "{} of {} on {} failed".format(command, service, unit.entity_id)) + + +def get_mojo_cacert_path(): + """Retrieve cacert from Mojo storage location. + + :returns: Path to cacert + :rtype: str + :raises: zaza_exceptions.CACERTNotFound + :raises: :class:`zaza_exceptions.CACERTNotfound` + """ + try: + cert_dir = os.environ['MOJO_LOCAL_DIR'] + except KeyError: + raise zaza_exceptions.CACERTNotFound( + "Could not find cacert.pem, MOJO_LOCAL_DIR unset") + cacert = os.path.join(cert_dir, 'cacert.pem') + if os.path.exists(cacert): + return cacert + else: + raise zaza_exceptions.CACERTNotFound("Could not find cacert.pem") + + +def attach_file_resource(application_name, resource_name, + file_content, file_suffix=".txt"): + """Attaches a file as a Juju resource given the file content and suffix. + + The file content will be written into a temporary file with the given + suffix, and it will be attached to the Juju application. + + :param application_name: Juju application name. + :type application_name: string + :param resource_name: Juju resource name. + :type resource_name: string + :param file_content: The content of the file that will be attached + :type file_content: string + :param file_suffix: File suffix. This should be used to set the file + extension for applications that are sensitive to this. + :type file_suffix: string + :returns: None + """ + with tempfile.NamedTemporaryFile(mode='w', suffix=file_suffix) as fp: + fp.write(file_content) + fp.flush() + model.attach_resource( + application_name, resource_name, fp.name) diff --git a/zaza/openstack/utilities/juju.py b/zaza/openstack/utilities/juju.py index 0c46b7e..073a26f 100644 --- a/zaza/openstack/utilities/juju.py +++ b/zaza/openstack/utilities/juju.py @@ -13,40 +13,66 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Module for interacting with juju.""" -import os -from pathlib import Path -import yaml +"""Deprecated, please use zaza.utilities.juju.""" -from zaza import ( - model, - controller, -) -from zaza.openstack.utilities import generic as generic_utils +import logging +import functools + +import zaza.utilities.juju -def get_application_status(application=None, unit=None): +def deprecate(): + """Add a deprecation warning to wrapped function.""" + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + msg = ( + "{} from zaza.openstack.utilities.juju is deprecated. " + "Please use the equivalent from zaza.utilities.juju".format( + f.__name__)) + logging.warning(msg) + return f(*args, **kwargs) + return wrapped_f + return wrap + + +@deprecate() +def get_application_status(application=None, unit=None, model_name=None): """Return the juju status for an application. :param application: Application name :type application: string :param unit: Specific unit :type unit: string + :param model_name: Name of model to query. + :type model_name: str :returns: Juju status output for an application :rtype: dict """ - status = get_full_juju_status() - - if unit and not application: - application = unit.split("/")[0] - - if application: - status = status.applications.get(application) - if unit: - status = status.get("units").get(unit) - return status + return zaza.utilities.juju.get_application_status( + application=application, + unit=unit, + model_name=model_name) +@deprecate() +def get_application_ip(application, model_name=None): + """Get the application's IP address. + + :param application: Application name + :type application: str + :param model_name: Name of model to query. + :type model_name: str + :returns: Application's IP address + :rtype: str + """ + return zaza.utilities.juju.get_application_ip( + application, + model_name=model_name) + + +@deprecate() def get_cloud_configs(cloud=None): """Get cloud configuration from local clouds.yaml. @@ -58,127 +84,119 @@ def get_cloud_configs(cloud=None): :returns: Dictionary of cloud configuration :rtype: dict """ - home = str(Path.home()) - cloud_config = os.path.join(home, ".local", "share", "juju", "clouds.yaml") - if cloud: - return generic_utils.get_yaml_config(cloud_config)["clouds"].get(cloud) - else: - return generic_utils.get_yaml_config(cloud_config) + return zaza.utilities.juju.get_cloud_configs( + cloud=cloud) -def get_full_juju_status(): +@deprecate() +def get_full_juju_status(model_name=None): """Return the full juju status output. + :param model_name: Name of model to query. + :type model_name: str :returns: Full juju status output :rtype: dict """ - status = model.get_status() - return status + return zaza.utilities.juju.get_full_juju_status( + model_name=model_name) -def get_machines_for_application(application): +@deprecate() +def get_machines_for_application(application, model_name=None): """Return machines for a given application. :param application: Application name :type application: string - :returns: List of machines for an application - :rtype: list + :param model_name: Name of model to query. + :type model_name: str + :returns: machines for an application + :rtype: Iterator[str] """ - status = get_application_status(application) - - # libjuju juju status no longer has units for subordinate charms - # Use the application it is subordinate-to to find machines - if status.get("units") is None and status.get("subordinate-to"): - return get_machines_for_application(status.get("subordinate-to")[0]) - - machines = [] - for unit in status.get("units").keys(): - machines.append( - status.get("units").get(unit).get("machine")) - return machines + return zaza.utilities.juju.get_machines_for_application( + application, + model_name=model_name) -def get_unit_name_from_host_name(host_name, application): +@deprecate() +def get_unit_name_from_host_name(host_name, application, model_name=None): """Return the juju unit name corresponding to a hostname. :param host_name: Host name to map to unit name. :type host_name: string :param application: Application name :type application: string + :param model_name: Name of model to query. + :type model_name: str """ - # Assume that a juju managed hostname always ends in the machine number. - machine_number = host_name.split('-')[-1] - unit_names = [ - u.entity_id - for u in model.get_units(application_name=application) - if int(u.data['machine-id']) == int(machine_number)] - return unit_names[0] + return zaza.utilities.juju.get_unit_name_from_host_name( + host_name, + application, + model_name=model_name) -def get_machine_status(machine, key=None): +@deprecate() +def get_machine_status(machine, key=None, model_name=None): """Return the juju status for a machine. :param machine: Machine number :type machine: string :param key: Key option requested :type key: string + :param model_name: Name of model to query. + :type model_name: str :returns: Juju status output for a machine :rtype: dict """ - status = get_full_juju_status() - status = status.machines.get(machine) - if key: - status = status.get(key) - return status + return zaza.utilities.juju.get_machine_status( + machine, + key=key, + model_name=model_name) -def get_machine_series(machine): +@deprecate() +def get_machine_series(machine, model_name=None): """Return the juju series for a machine. :param machine: Machine number :type machine: string + :param model_name: Name of model to query. + :type model_name: str :returns: Juju series :rtype: string """ - return get_machine_status( - machine=machine, - key='series' - ) + return zaza.utilities.juju.get_machine_series( + machine, + model_name=model_name) -def get_machine_uuids_for_application(application): +@deprecate() +def get_machine_uuids_for_application(application, model_name=None): """Return machine uuids for a given application. :param application: Application name :type application: string - :returns: List of machine uuuids for an application - :rtype: list + :param model_name: Name of model to query. + :type model_name: str + :returns: machine uuuids for an application + :rtype: Iterator[str] """ - uuids = [] - for machine in get_machines_for_application(application): - uuids.append(get_machine_status(machine, key="instance-id")) - return uuids + return zaza.utilities.juju.get_machine_uuids_for_application( + application, + model_name=model_name) +@deprecate() def get_provider_type(): """Get the type of the undercloud. :returns: Name of the undercloud type :rtype: string """ - cloud = controller.get_cloud() - if cloud: - # If the controller was deployed from this system with - # the cloud configured in ~/.local/share/juju/clouds.yaml - # Determine the cloud type directly - return get_cloud_configs(cloud)["type"] - else: - # If the controller was deployed elsewhere - # For now assume openstack - return "openstack" + return zaza.utilities.juju.get_provider_type() -def remote_run(unit, remote_cmd, timeout=None, fatal=None): +@deprecate() +def remote_run(unit, remote_cmd, timeout=None, fatal=None, model_name=None): """Run command on unit and return the output. NOTE: This function is pre-deprecated. As soon as libjuju unit.run is able @@ -190,43 +208,23 @@ def remote_run(unit, remote_cmd, timeout=None, fatal=None): :type arg: int :param fatal: Command failure condidered fatal or not :type fatal: boolean + :param model_name: Name of model to query. + :type model_name: str :returns: Juju run output :rtype: string :raises: model.CommandRunFailed """ - if fatal is None: - fatal = True - result = model.run_on_unit(unit, remote_cmd, timeout=timeout) - if result: - if int(result.get("Code")) == 0: - return result.get("Stdout") - else: - if fatal: - raise model.CommandRunFailed(remote_cmd, result) - return result.get("Stderr") + return zaza.utilities.juju.remote_run( + unit, + remote_cmd, + timeout=timeout, + fatal=fatal, + model_name=model_name) -def _get_unit_names(names): - """Resolve given application names to first unit name of said application. - - Helper function that resolves application names to first unit name of - said application. Any already resolved unit names are returned as-is. - - :param names: List of units/applications to translate - :type names: list(str) - :returns: List of units - :rtype: list(str) - """ - result = [] - for name in names: - if '/' in name: - result.append(name) - else: - result.append(model.get_first_unit_name(name)) - return result - - -def get_relation_from_unit(entity, remote_entity, remote_interface_name): +@deprecate() +def get_relation_from_unit(entity, remote_entity, remote_interface_name, + model_name=None): """Get relation data passed between two units. Get relation data for relation with `remote_interface_name` between @@ -243,35 +241,72 @@ def get_relation_from_unit(entity, remote_entity, remote_interface_name): :param remote_interface_name: Name of interface to query on remote end of relation :type remote_interface_name: str + :param model_name: Name of model to query. + :type model_name: str :returns: dict with relation data :rtype: dict :raises: model.CommandRunFailed """ - application = entity.split('/')[0] - remote_application = remote_entity.split('/')[0] - rid = model.get_relation_id(application, remote_application, - remote_interface_name=remote_interface_name) - (unit, remote_unit) = _get_unit_names([entity, remote_entity]) - cmd = 'relation-get --format=yaml -r "{}" - "{}"' .format(rid, remote_unit) - result = model.run_on_unit(unit, cmd) - if result and int(result.get('Code')) == 0: - return yaml.safe_load(result.get('Stdout')) - else: - raise model.CommandRunFailed(cmd, result) + return zaza.utilities.juju.get_relation_from_unit( + entity, + remote_entity, + remote_interface_name, + model_name=model_name) -def leader_get(application, key=''): +@deprecate() +def leader_get(application, key='', model_name=None): """Get leader settings from leader unit of named application. :param application: Application to get leader settings from. :type application: str + :param key: Key option requested + :type key: string + :param model_name: Name of model to query. + :type model_name: str :returns: dict with leader settings :rtype: dict :raises: model.CommandRunFailed """ - cmd = 'leader-get --format=yaml {}'.format(key) - result = model.run_on_leader(application, cmd) - if result and int(result.get('Code')) == 0: - return yaml.safe_load(result.get('Stdout')) - else: - raise model.CommandRunFailed(cmd, result) + return zaza.utilities.juju.leader_get( + application, + key=key, + model_name=model_name) + + +@deprecate() +def get_subordinate_units(unit_list, charm_name=None, status=None, + model_name=None): + """Get a list of all subordinate units associated with units in unit_list. + + Get a list of all subordinate units associated with units in unit_list. + Subordinate can be filtered by using 'charm_name' which will only return + subordinate units which have 'charm_name' in the name of the charm e.g. + + get_subordinate_units( + ['cinder/1']) would return ['cinder-hacluster/1', + 'cinder-ceph/2']) + where as + + get_subordinate_units( + ['cinder/1'], charm_name='hac') would return ['cinder-hacluster/1'] + + NOTE: The charm_name match is against the name of the charm not the + application name. + + :param charm_name: List of unit names + :type unit_list: [] + :param charm_name: charm_name to match against, can be a sub-string. + :type charm_name: str + :param status: Juju status to query against, + :type status: juju.client._definitions.FullStatus + :param model_name: Name of model to query. + :type model_name: str + :returns: List of matching unit names. + :rtype: [] + """ + return zaza.utilities.juju.get_subordinate_units( + unit_list, + charm_name=charm_name, + status=status, + model_name=model_name) diff --git a/zaza/openstack/utilities/openstack.py b/zaza/openstack/utilities/openstack.py index 6769de6..3082068 100644 --- a/zaza/openstack/utilities/openstack.py +++ b/zaza/openstack/utilities/openstack.py @@ -14,19 +14,44 @@ """Module for interacting with OpenStack. -This module contains a number of functions for interacting with Openstack. +This module contains a number of functions for interacting with OpenStack. """ +import collections +import copy +import datetime +import enum +import io +import itertools +import juju_wait +import logging +import os +import paramiko +import re +import shutil +import six +import subprocess +import sys +import tempfile +import tenacity +import textwrap +import urllib + + from .os_versions import ( OPENSTACK_CODENAMES, SWIFT_CODENAMES, + OVN_CODENAMES, PACKAGE_CODENAMES, OPENSTACK_RELEASES_PAIRS, ) from openstack import connection +from aodhclient.v2 import client as aodh_client from cinderclient import client as cinderclient +from heatclient import client as heatclient from glanceclient import Client as GlanceClient +from designateclient.client import Client as DesignateClient from keystoneclient.v2_0 import client as keystoneclient_v2 from keystoneclient.v3 import client as keystoneclient_v3 @@ -36,38 +61,35 @@ from keystoneauth1.identity import ( v2, ) import zaza.openstack.utilities.cert as cert +import zaza.utilities.deployment_env as deployment_env +import zaza.utilities.juju as juju_utils +import zaza.utilities.maas from novaclient import client as novaclient_client from neutronclient.v2_0 import client as neutronclient from neutronclient.common import exceptions as neutronexceptions from octaviaclient.api.v2 import octavia as octaviaclient from swiftclient import client as swiftclient +from manilaclient import client as manilaclient -import datetime -import io -import juju_wait -import logging -import os -import paramiko -import re -import six -import subprocess -import sys -import tempfile -import tenacity -import urllib +from juju.errors import JujuError + +import zaza from zaza import model from zaza.openstack.utilities import ( exceptions, generic as generic_utils, - juju as juju_utils, ) +import zaza.utilities.networking as network_utils + CIRROS_RELEASE_URL = 'http://download.cirros-cloud.net/version/released' CIRROS_IMAGE_URL = 'http://download.cirros-cloud.net' UBUNTU_IMAGE_URLS = { 'bionic': ('http://cloud-images.ubuntu.com/{release}/current/' - '{release}-server-cloudimg-{arch}.img') + '{release}-server-cloudimg-{arch}.img'), + 'focal': ('http://cloud-images.ubuntu.com/{release}/current/' + '{release}-server-cloudimg-{arch}.img'), } CHARM_TYPES = { @@ -99,18 +121,43 @@ CHARM_TYPES = { 'pkg': 'ceilometer-common', 'origin_setting': 'openstack-origin' }, + 'designate': { + 'pkg': 'designate-common', + 'origin_setting': 'openstack-origin' + }, + 'ovn-central': { + 'pkg': 'ovn-common', + 'origin_setting': 'source' + }, + 'ceph-mon': { + 'pkg': 'ceph-common', + 'origin_setting': 'source' + }, + 'placement': { + 'pkg': 'placement-common', + 'origin_setting': 'openstack-origin' + }, } + +# Older tests use the order the services appear in the list to imply +# the order they should be upgraded in. This approach has been superceded and +# zaza.openstack.utilities.openstack_upgrade.get_upgrade_groups should be used +# instead. UPGRADE_SERVICES = [ {'name': 'keystone', 'type': CHARM_TYPES['keystone']}, - {'name': 'nova-cloud-controller', 'type': CHARM_TYPES['nova']}, - {'name': 'nova-compute', 'type': CHARM_TYPES['nova']}, {'name': 'neutron-api', 'type': CHARM_TYPES['neutron']}, - {'name': 'neutron-gateway', 'type': CHARM_TYPES['neutron']}, + {'name': 'nova-cloud-controller', 'type': CHARM_TYPES['nova']}, {'name': 'glance', 'type': CHARM_TYPES['glance']}, {'name': 'cinder', 'type': CHARM_TYPES['cinder']}, + {'name': 'neutron-gateway', 'type': CHARM_TYPES['neutron']}, + {'name': 'ceilometer', 'type': CHARM_TYPES['ceilometer']}, + {'name': 'designate', 'type': CHARM_TYPES['designate']}, + {'name': 'nova-compute', 'type': CHARM_TYPES['nova']}, {'name': 'openstack-dashboard', 'type': CHARM_TYPES['openstack-dashboard']}, - {'name': 'ceilometer', 'type': CHARM_TYPES['ceilometer']}, + {'name': 'ovn-central', 'type': CHARM_TYPES['ovn-central']}, + {'name': 'ceph-mon', 'type': CHARM_TYPES['ceph-mon']}, + {'name': 'placement', 'type': CHARM_TYPES['placement']}, ] @@ -137,17 +184,90 @@ WORKLOAD_STATUS_EXCEPTIONS = { 'ceilometer and gnocchi')}} # For vault TLS certificates +CACERT_FILENAME_FORMAT = "{}_juju_ca_cert.crt" +CERT_PROVIDERS = ['vault'] +REMOTE_CERT_DIR = "/usr/local/share/ca-certificates" KEYSTONE_CACERT = "keystone_juju_ca_cert.crt" KEYSTONE_REMOTE_CACERT = ( "/usr/local/share/ca-certificates/{}".format(KEYSTONE_CACERT)) -KEYSTONE_LOCAL_CACERT = ("/tmp/{}".format(KEYSTONE_CACERT)) -# Openstack Client helpers +async def async_block_until_ca_exists(application_name, ca_cert, + model_name=None, timeout=2700): + """Block until a CA cert is on all units of application_name. + + :param application_name: Name of application to check + :type application_name: str + :param ca_cert: The certificate content. + :type ca_cert: str + :param model_name: Name of model to query. + :type model_name: str + :param timeout: How long in seconds to wait + :type timeout: int + """ + async def _check_ca_present(model, ca_files): + units = model.applications[application_name].units + for ca_file in ca_files: + for unit in units: + try: + output = await unit.run('cat {}'.format(ca_file)) + contents = output.data.get('results').get('Stdout', '') + if ca_cert not in contents: + break + # libjuju throws a generic error for connection failure. So we + # cannot differentiate between a connectivity issue and a + # target file not existing error. For now just assume the + # latter. + except JujuError: + break + else: + # The CA was found in `ca_file` on all units. + return True + else: + return False + ca_files = await _async_get_remote_ca_cert_file_candidates( + application_name, + model_name=model_name) + async with zaza.model.run_in_model(model_name) as model: + await zaza.model.async_block_until( + lambda: _check_ca_present(model, ca_files), timeout=timeout) + +block_until_ca_exists = zaza.model.sync_wrapper(async_block_until_ca_exists) + + +def get_cacert_absolute_path(filename): + """Build string containing location of the CA Certificate file. + + :param filename: Expected filename for CA Certificate file. + :type filename: str + :returns: Absolute path to file containing CA Certificate + :rtype: str + """ + return os.path.join( + deployment_env.get_tmpdir(), filename) + + +def get_cacert(): + """Return path to CA Certificate bundle for verification during test. + + :returns: Path to CA Certificate bundle or None. + :rtype: Union[str, None] + """ + for _provider in CERT_PROVIDERS: + _cert = get_cacert_absolute_path( + CACERT_FILENAME_FORMAT.format(_provider)) + if os.path.exists(_cert): + return _cert + _keystone_local_cacert = get_cacert_absolute_path(KEYSTONE_CACERT) + if os.path.exists(_keystone_local_cacert): + return _keystone_local_cacert + + +# OpenStack Client helpers def get_ks_creds(cloud_creds, scope='PROJECT'): """Return the credentials for authenticating against keystone. - :param cloud_creds: Openstack RC environment credentials + :param cloud_creds: OpenStack RC environment credentials :type cloud_creds: dict :param scope: Authentication scope: PROJECT or DOMAIN :type scope: string @@ -194,15 +314,29 @@ def get_glance_session_client(session): return GlanceClient('2', session=session) -def get_nova_session_client(session): +def get_designate_session_client(**kwargs): + """Return designateclient authenticated by keystone session. + + :param kwargs: Designate Client Arguments + :returns: Authenticated designateclient + :rtype: DesignateClient + """ + version = kwargs.pop('version', None) or 2 + return DesignateClient(version=str(version), + **kwargs) + + +def get_nova_session_client(session, version=2): """Return novaclient authenticated by keystone session. :param session: Keystone session object :type session: keystoneauth1.session.Session object + :param version: Version of client to request. + :type version: float :returns: Authenticated novaclient :rtype: novaclient.Client object """ - return novaclient_client.Client(2, session=session) + return novaclient_client.Client(version, session=session) def get_neutron_session_client(session): @@ -217,18 +351,22 @@ def get_neutron_session_client(session): def get_swift_session_client(session, - region_name='RegionOne'): + region_name='RegionOne', + cacert=None): """Return swiftclient authenticated by keystone session. :param session: Keystone session object :type session: keystoneauth1.session.Session object :param region_name: Optional region name to use :type region_name: str + :param cacert: Path to CA Certificate + :type cacert: Optional[str] :returns: Authenticated swiftclient :rtype: swiftclient.Client object """ return swiftclient.Connection(session=session, - os_options={'region_name': region_name}) + os_options={'region_name': region_name}, + cacert=cacert) def get_octavia_session_client(session, service_type='load-balancer', @@ -256,6 +394,19 @@ def get_octavia_session_client(session, service_type='load-balancer', endpoint=endpoint.url) +def get_heat_session_client(session, version=1): + """Return heatclient authenticated by keystone session. + + :param session: Keystone session object + :type session: keystoneauth1.session.Session object + :param version: Heat API version + :type version: int + :returns: Authenticated cinderclient + :rtype: heatclient.Client object + """ + return heatclient.Client(session=session, version=version) + + def get_cinder_session_client(session, version=2): """Return cinderclient authenticated by keystone session. @@ -288,13 +439,40 @@ def get_masakari_session_client(session, interface='internal', return conn.instance_ha -def get_keystone_scope(): +def get_aodh_session_client(session): + """Return aodh client authenticated by keystone session. + + :param session: Keystone session object + :type session: keystoneauth1.session.Session object + :returns: Authenticated aodh client + :rtype: openstack.instance_ha.v1._proxy.Proxy + """ + return aodh_client.Client(session=session) + + +def get_manila_session_client(session, version='2'): + """Return Manila client authenticated by keystone session. + + :param session: Keystone session object + :type session: keystoneauth1.session.Session object + :param version: Manila API version + :type version: str + :returns: Authenticated manilaclient + :rtype: manilaclient.Client + """ + return manilaclient.Client(session=session, client_version=version) + + +def get_keystone_scope(model_name=None): """Return Keystone scope based on OpenStack release of the overcloud. + :param model_name: Name of model to query. + :type model_name: str :returns: String keystone scope :rtype: string """ - os_version = get_current_os_versions("keystone")["keystone"] + os_version = get_current_os_versions("keystone", + model_name=model_name)["keystone"] # Keystone policy.json shipped the charm with liberty requires a domain # scoped token. Bug #1649106 if os_version == "liberty": @@ -307,7 +485,7 @@ def get_keystone_scope(): def get_keystone_session(openrc_creds, scope='PROJECT', verify=None): """Return keystone session. - :param openrc_creds: Openstack RC credentials + :param openrc_creds: OpenStack RC credentials :type openrc_creds: dict :param verify: Control TLS certificate verification behaviour :type verify: any (True - use system certs, @@ -329,17 +507,20 @@ def get_keystone_session(openrc_creds, scope='PROJECT', verify=None): return session.Session(auth=auth, verify=verify) -def get_overcloud_keystone_session(verify=None): +def get_overcloud_keystone_session(verify=None, model_name=None): """Return Over cloud keystone session. :param verify: Control TLS certificate verification behaviour :type verify: any + :param model_name: Name of model to query. + :type model_name: str :returns keystone_session: keystoneauth1.session.Session object :rtype: keystoneauth1.session.Session """ - return get_keystone_session(get_overcloud_auth(), - scope=get_keystone_scope(), - verify=verify) + return get_keystone_session( + get_overcloud_auth(model_name=model_name), + scope=get_keystone_scope(model_name=model_name), + verify=verify) def get_undercloud_keystone_session(verify=None): @@ -373,7 +554,7 @@ def get_keystone_session_client(session, client_api_version=3): def get_keystone_client(openrc_creds, verify=None): """Return authenticated keystoneclient and set auth_ref for service_catalog. - :param openrc_creds: Openstack RC credentials + :param openrc_creds: OpenStack RC credentials :type openrc_creds: dict :param verify: Control TLS certificate verification behaviour :type verify: any @@ -416,12 +597,28 @@ def get_project_id(ks_client, project_name, api_version=2, domain_name=None): return None +def get_domain_id(ks_client, domain_name): + """Return domain ID. + + :param ks_client: Authenticated keystoneclient + :type ks_client: keystoneclient.v3.Client object + :param domain_name: Name of the domain + :type domain_name: string + :returns: Domain ID + :rtype: string or None + """ + all_domains = ks_client.domains.list(name=domain_name) + if all_domains: + return all_domains[0].id + return None + + # Neutron Helpers def get_gateway_uuids(): """Return machine uuids for neutron-gateway(s). :returns: List of uuids - :rtype: list + :rtype: Iterator[str] """ return juju_utils.get_machine_uuids_for_application('neutron-gateway') @@ -430,28 +627,78 @@ def get_ovs_uuids(): """Return machine uuids for neutron-openvswitch(s). :returns: List of uuids - :rtype: list + :rtype: Iterator[str] """ - return (juju_utils - .get_machine_uuids_for_application('neutron-openvswitch')) + return juju_utils.get_machine_uuids_for_application('neutron-openvswitch') + + +def get_ovn_uuids(): + """Provide machine uuids for OVN Chassis. + + :returns: List of uuids + :rtype: Iterator[str] + """ + return itertools.chain( + juju_utils.get_machine_uuids_for_application('ovn-chassis'), + juju_utils.get_machine_uuids_for_application('ovn-dedicated-chassis'), + ) + + +def dvr_enabled(): + """Check whether DVR is enabled in deployment. + + :returns: True when DVR is enabled, False otherwise + :rtype: bool + """ + return get_application_config_option('neutron-api', 'enable-dvr') + + +def ngw_present(): + """Check whether Neutron Gateway is present in deployment. + + :returns: True when Neutron Gateway is present, False otherwise + :rtype: bool + """ + try: + model.get_application('neutron-gateway') + return True + except KeyError: + pass + return False + + +def ovn_present(): + """Check whether OVN is present in deployment. + + :returns: True when OVN is present, False otherwise + :rtype: bool + """ + app_presence = [] + for name in ('ovn-chassis', 'ovn-dedicated-chassis'): + try: + model.get_application(name) + app_presence.append(True) + except KeyError: + app_presence.append(False) + return any(app_presence) BRIDGE_MAPPINGS = 'bridge-mappings' NEW_STYLE_NETWORKING = 'physnet1:br-ex' -def deprecated_external_networking(dvr_mode=False): +def deprecated_external_networking(): """Determine whether deprecated external network mode is in use. - :param dvr_mode: Using DVR mode or not - :type dvr_mode: boolean :returns: True or False :rtype: boolean """ bridge_mappings = None - if dvr_mode: + if dvr_enabled(): bridge_mappings = get_application_config_option('neutron-openvswitch', BRIDGE_MAPPINGS) + elif ovn_present(): + return False else: bridge_mappings = get_application_config_option('neutron-gateway', BRIDGE_MAPPINGS) @@ -488,43 +735,201 @@ def get_admin_net(neutron_client): return net -def configure_gateway_ext_port(novaclient, neutronclient, - dvr_mode=None, net_id=None): - """Configure the neturong-gateway external port. +def add_interface_to_netplan(server_name, mac_address): + """In guest server_name, add nic with mac_address to netplan. - :param novaclient: Authenticated novaclient - :type novaclient: novaclient.Client object - :param neutronclient: Authenticated neutronclient - :type neutronclient: neutronclient.Client object - :param dvr_mode: Using DVR mode or not - :type dvr_mode: boolean - :param net_id: Network ID - :type net_id: string + :param server_name: Hostname of instance + :type server_name: string + :param mac_address: mac address of nic to be added to netplan + :type mac_address: string """ - if dvr_mode: - uuids = get_ovs_uuids() + if dvr_enabled(): + application_names = ('neutron-openvswitch',) + elif ovn_present(): + # OVN chassis is a subordinate to nova-compute + application_names = ('nova-compute', 'ovn-dedicated-chassis') else: - uuids = get_gateway_uuids() + application_names = ('neutron-gateway',) - deprecated_extnet_mode = deprecated_external_networking(dvr_mode) + for app_name in application_names: + unit_name = juju_utils.get_unit_name_from_host_name( + server_name, app_name) + if unit_name: + break + else: + raise RuntimeError('Unable to find unit to run commands on.') + run_cmd_nic = "ip -f link -br -o addr|grep {}".format(mac_address) + interface = model.run_on_unit(unit_name, run_cmd_nic) + interface = interface['Stdout'].split(' ')[0] - config_key = 'data-port' - if deprecated_extnet_mode: - config_key = 'ext-port' + run_cmd_netplan = """sudo egrep -iR '{}|{}$' /etc/netplan/ + """.format(mac_address, interface) - if not net_id: - net_id = get_admin_net(neutronclient)['id'] + netplancfg = model.run_on_unit(unit_name, run_cmd_netplan) - for uuid in uuids: + if (mac_address in str(netplancfg)) or (interface in str(netplancfg)): + logging.warn("mac address {} or nic {} already exists in " + "/etc/netplan".format(mac_address, interface)) + return + body_value = textwrap.dedent("""\ + network: + ethernets: + {0}: + dhcp4: false + dhcp6: true + optional: true + match: + macaddress: {1} + set-name: {0} + version: 2 + """.format(interface, mac_address)) + logging.debug("plumb guest interface debug info:") + logging.debug("body_value: {}\nunit_name: {}\ninterface: {}\nmac_address:" + "{}\nserver_name: {}".format(body_value, unit_name, + interface, mac_address, + server_name)) + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(3), + wait=tenacity.wait_exponential( + multiplier=1, min=2, max=10)): + with attempt: + with tempfile.NamedTemporaryFile(mode="w") as netplan_file: + netplan_file.write(body_value) + netplan_file.flush() + model.scp_to_unit( + unit_name, netplan_file.name, + '/home/ubuntu/60-dataport.yaml', user="ubuntu") + run_cmd_mv = "sudo mv /home/ubuntu/60-dataport.yaml /etc/netplan/" + model.run_on_unit(unit_name, run_cmd_mv) + model.run_on_unit(unit_name, "sudo netplan apply") + + +class OpenStackNetworkingTopology(enum.Enum): + """OpenStack Charms Network Topologies.""" + + ML2_OVS = 'ML2+OVS' + ML2_OVS_DVR = 'ML2+OVS+DVR' + ML2_OVS_DVR_SNAT = 'ML2+OVS+DVR, no dedicated GWs' + ML2_OVN = 'ML2+OVN' + + +CharmedOpenStackNetworkingData = collections.namedtuple( + 'CharmedOpenStackNetworkingData', + [ + 'topology', + 'application_names', + 'unit_machine_ids', + 'port_config_key', + 'other_config', + ]) + + +def get_charm_networking_data(limit_gws=None): + """Inspect Juju model, determine networking topology and return data. + + :param limit_gws: Limit the number of gateways that get a port attached + :type limit_gws: Optional[int] + :rtype: CharmedOpenStackNetworkingData[ + OpenStackNetworkingTopology, + List[str], + Iterator[str], + str, + Dict[str,str]] + :returns: Named Tuple with networking data, example: + CharmedOpenStackNetworkingData( + OpenStackNetworkingTopology.ML2_OVN, + ['ovn-chassis', 'ovn-dedicated-chassis'], + ['machine-id-1', 'machine-id-2'], # generator object + 'bridge-interface-mappings', + {'ovn-bridge-mappings': 'physnet1:br-ex'}) + :raises: RuntimeError + """ + # Initialize defaults, these will be amended to fit the reality of the + # model in the checks below. + topology = OpenStackNetworkingTopology.ML2_OVS + other_config = {} + port_config_key = ( + 'data-port' if not deprecated_external_networking() else 'ext-port') + unit_machine_ids = [] + application_names = [] + + if dvr_enabled(): + if ngw_present(): + application_names = ['neutron-gateway', 'neutron-openvswitch'] + topology = OpenStackNetworkingTopology.ML2_OVS_DVR + else: + application_names = ['neutron-openvswitch'] + topology = OpenStackNetworkingTopology.ML2_OVS_DVR_SNAT + unit_machine_ids = itertools.islice( + itertools.chain( + get_ovs_uuids(), + get_gateway_uuids()), + limit_gws) + elif ngw_present(): + unit_machine_ids = itertools.islice( + get_gateway_uuids(), limit_gws) + application_names = ['neutron-gateway'] + elif ovn_present(): + topology = OpenStackNetworkingTopology.ML2_OVN + unit_machine_ids = itertools.islice(get_ovn_uuids(), limit_gws) + application_names = ['ovn-chassis'] + try: + ovn_dc_name = 'ovn-dedicated-chassis' + model.get_application(ovn_dc_name) + application_names.append(ovn_dc_name) + except KeyError: + # ovn-dedicated-chassis not in deployment + pass + port_config_key = 'bridge-interface-mappings' + other_config.update({'ovn-bridge-mappings': 'physnet1:br-ex'}) + else: + raise RuntimeError('Unable to determine charm network topology.') + + return CharmedOpenStackNetworkingData( + topology, + application_names, + unit_machine_ids, + port_config_key, + other_config) + + +def create_additional_port_for_machines(novaclient, neutronclient, net_id, + unit_machine_ids, + add_dataport_to_netplan=False): + """Create additional port for machines for use with external networking. + + :param novaclient: Undercloud Authenticated novaclient. + :type novaclient: novaclient.Client object + :param neutronclient: Undercloud Authenticated neutronclient. + :type neutronclient: neutronclient.Client object + :param net_id: Network ID to create ports on. + :type net_id: string + :param unit_machine_ids: Juju provider specific machine IDs for which we + should add ports on. + :type unit_machine_ids: Iterator[str] + :param add_dataport_to_netplan: Whether the newly created port should be + added to instance system configuration so + that it is brought up on instance reboot. + :type add_dataport_to_netplan: Optional[bool] + :returns: List of MAC addresses for created ports. + :rtype: List[str] + :raises: RuntimeError + """ + eligible_machines = 0 + for uuid in unit_machine_ids: + eligible_machines += 1 server = novaclient.servers.get(uuid) ext_port_name = "{}_ext-port".format(server.name) for port in neutronclient.list_ports(device_id=server.id)['ports']: if port['name'] == ext_port_name: - logging.warning('Neutron Gateway already has additional port') + logging.warning( + 'Instance {} already has additional port, skipping.' + .format(server.id)) break else: - logging.info('Attaching additional port to instance, ' - 'connected to net id: {}'.format(net_id)) + logging.info('Attaching additional port to instance ("{}"), ' + 'connected to net id: {}' + .format(uuid, net_id)) body_value = { "port": { "admin_state_up": True, @@ -536,32 +941,150 @@ def configure_gateway_ext_port(novaclient, neutronclient, port = neutronclient.create_port(body=body_value) server.interface_attach(port_id=port['port']['id'], net_id=None, fixed_ip=None) - ext_br_macs = [] - for port in neutronclient.list_ports(network_id=net_id)['ports']: - if 'ext-port' in port['name']: - if deprecated_extnet_mode: - ext_br_macs.append(port['mac_address']) - else: - ext_br_macs.append('br-ex:{}'.format(port['mac_address'])) - ext_br_macs.sort() - ext_br_macs_str = ' '.join(ext_br_macs) - if dvr_mode: - application_name = 'neutron-openvswitch' - else: - application_name = 'neutron-gateway' + if add_dataport_to_netplan: + mac_address = get_mac_from_port(port, neutronclient) + add_interface_to_netplan(server.name, + mac_address=mac_address) + if not eligible_machines: + # NOTE: unit_machine_ids may be an iterator so testing it for contents + # or length prior to iterating over it is futile. + raise RuntimeError('Unable to determine UUIDs for machines to attach ' + 'external networking to.') - if ext_br_macs: - logging.info('Setting {} on {} external port to {}'.format( - config_key, application_name, ext_br_macs_str)) - current_data_port = get_application_config_option(application_name, - config_key) - if current_data_port == ext_br_macs_str: + # Retrieve the just created ports from Neutron so that we can provide our + # caller with their MAC addresses. + return [ + port['mac_address'] + for port in neutronclient.list_ports(network_id=net_id)['ports'] + if 'ext-port' in port['name'] + ] + + +def configure_networking_charms(networking_data, macs, use_juju_wait=True): + """Configure external networking for networking charms. + + :param networking_data: Data on networking charm topology. + :type networking_data: CharmedOpenStackNetworkingData + :param macs: MAC addresses of ports for use with external networking. + :type macs: Iterator[str] + :param use_juju_wait: Whether to use juju wait to wait for the model to + settle once the gateway has been configured. Default is True + :type use_juju_wait: Optional[bool] + """ + br_mac_fmt = 'br-ex:{}' if not deprecated_external_networking() else '{}' + br_mac = [ + br_mac_fmt.format(mac) + for mac in macs + ] + + config = copy.deepcopy(networking_data.other_config) + config.update({networking_data.port_config_key: ' '.join(sorted(br_mac))}) + + for application_name in networking_data.application_names: + logging.info('Setting {} on {}'.format( + config, application_name)) + current_data_port = get_application_config_option( + application_name, + networking_data.port_config_key) + if current_data_port == config[networking_data.port_config_key]: logging.info('Config already set to value') return + model.set_application_config( application_name, - configuration={config_key: ext_br_macs_str}) + configuration=config) + # NOTE(fnordahl): We are stuck with juju_wait until we figure out how + # to deal with all the non ['active', 'idle', 'Unit is ready.'] + # workload/agent states and msgs that our mojo specs are exposed to. + if use_juju_wait: juju_wait.wait(wait_for_workload=True) + else: + zaza.model.wait_for_agent_status() + # TODO: shouldn't access get_charm_config() here as it relies on + # ./tests/tests.yaml existing by default (regardless of the + # fatal=False) ... it's not great design. + test_config = zaza.charm_lifecycle.utils.get_charm_config( + fatal=False) + zaza.model.wait_for_application_states( + states=test_config.get('target_deploy_status', {})) + + +def configure_gateway_ext_port(novaclient, neutronclient, net_id=None, + add_dataport_to_netplan=False, + limit_gws=None, + use_juju_wait=True): + """Configure the neturong-gateway external port. + + :param novaclient: Authenticated novaclient + :type novaclient: novaclient.Client object + :param neutronclient: Authenticated neutronclient + :type neutronclient: neutronclient.Client object + :param net_id: Network ID + :type net_id: string + :param limit_gws: Limit the number of gateways that get a port attached + :type limit_gws: Optional[int] + :param use_juju_wait: Whether to use juju wait to wait for the model to + settle once the gateway has been configured. Default is True + :type use_juju_wait: boolean + """ + networking_data = get_charm_networking_data(limit_gws=limit_gws) + if networking_data.topology in ( + OpenStackNetworkingTopology.ML2_OVS_DVR, + OpenStackNetworkingTopology.ML2_OVS_DVR_SNAT): + # If dvr, do not attempt to persist nic in netplan + # https://github.com/openstack-charmers/zaza-openstack-tests/issues/78 + add_dataport_to_netplan = False + + if not net_id: + net_id = get_admin_net(neutronclient)['id'] + + macs = create_additional_port_for_machines( + novaclient, neutronclient, net_id, networking_data.unit_machine_ids, + add_dataport_to_netplan) + + if macs: + configure_networking_charms( + networking_data, macs, use_juju_wait=use_juju_wait) + + +def configure_charmed_openstack_on_maas(network_config, limit_gws=None): + """Configure networking charms for charm-based OVS config on MAAS provider. + + :param network_config: Network configuration as provided in environment. + :type network_config: Dict[str] + :param limit_gws: Limit the number of gateways that get a port attached + :type limit_gws: Optional[int] + """ + networking_data = get_charm_networking_data(limit_gws=limit_gws) + macs = [ + mim.mac + for mim in zaza.utilities.maas.get_macs_from_cidr( + zaza.utilities.maas.get_maas_client_from_juju_cloud_data( + zaza.model.get_cloud_data()), + network_config['external_net_cidr'], + link_mode=zaza.utilities.maas.LinkMode.LINK_UP) + ] + if macs: + configure_networking_charms( + networking_data, macs, use_juju_wait=False) + + +@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60), + reraise=True, retry=tenacity.retry_if_exception_type(KeyError)) +def get_mac_from_port(port, neutronclient): + """Get mac address from port, with tenacity due to openstack async. + + :param port: neutron port + :type port: neutron port + :param neutronclient: Authenticated neutronclient + :type neutronclient: neutronclient.Client object + :returns: mac address + :rtype: string + """ + logging.info("Trying to get mac address from port:" + "{}".format(port['port']['id'])) + refresh_port = neutronclient.show_port(port['port']['id']) + return refresh_port['port']['mac_address'] def create_project_network(neutron_client, project_id, net_name='private', @@ -604,16 +1127,13 @@ def create_project_network(neutron_client, project_id, net_name='private', return network -def create_external_network(neutron_client, project_id, dvr_mode, - net_name='ext_net'): +def create_external_network(neutron_client, project_id, net_name='ext_net'): """Create the external network. :param neutron_client: Authenticated neutronclient :type neutron_client: neutronclient.Client object :param project_id: Project ID :type project_id: string - :param dvr_mode: Using DVR mode or not - :type dvr_mode: boolean :param net_name: Network name :type net_name: string :returns: Network object @@ -1175,8 +1695,23 @@ def get_swift_codename(version): :returns: Codename for swift :rtype: string """ - codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v] - return codenames[0] + return _get_special_codename(version, SWIFT_CODENAMES) + + +def get_ovn_codename(version): + """Determine OpenStack codename that corresponds to OVN version. + + :param version: Version of OVN + :type version: string + :returns: Codename for OVN + :rtype: string + """ + return _get_special_codename(version, OVN_CODENAMES) + + +def _get_special_codename(version, codenames): + found = [k for k, v in six.iteritems(codenames) if version in v] + return found[0] def get_os_code_info(package, pkg_version): @@ -1189,17 +1724,16 @@ def get_os_code_info(package, pkg_version): :returns: Codename for package :rtype: string """ - # {'code_num': entry, 'code_name': OPENSTACK_CODENAMES[entry]} # Remove epoch if it exists if ':' in pkg_version: pkg_version = pkg_version.split(':')[1:][0] if 'swift' in package: # Fully x.y.z match for swift versions - match = re.match('^(\d+)\.(\d+)\.(\d+)', pkg_version) + match = re.match(r'^(\d+)\.(\d+)\.(\d+)', pkg_version) else: # x.y match only for 20XX.X # and ignore patch level for other packages - match = re.match('^(\d+)\.(\d+)', pkg_version) + match = re.match(r'^(\d+)\.(\d+)', pkg_version) if match: vers = match.group(0) @@ -1213,15 +1747,59 @@ def get_os_code_info(package, pkg_version): # < Liberty co-ordinated project versions if 'swift' in package: return get_swift_codename(vers) + elif 'ovn' in package: + return get_ovn_codename(vers) else: return OPENSTACK_CODENAMES[vers] -def get_current_os_versions(deployed_applications): +def get_openstack_release(application, model_name=None): + """Return the openstack release codename based on /etc/openstack-release. + + This will only return a codename if the openstack-release package is + installed on the unit. + + :param application: Application name + :type application: string + :param model_name: Name of model to query. + :type model_name: str + :returns: OpenStack release codename for application + :rtype: string + """ + versions = [] + units = model.get_units(application, model_name=model_name) + for unit in units: + cmd = 'cat /etc/openstack-release | grep OPENSTACK_CODENAME' + try: + out = juju_utils.remote_run(unit.entity_id, cmd, + model_name=model_name) + except model.CommandRunFailed: + logging.debug('Fall back to version check for OpenStack codename') + else: + codename = out.split('=')[1].strip() + versions.append(codename) + if len(set(versions)) == 0: + return None + elif len(set(versions)) > 1: + raise Exception('Unexpected mix of OpenStack releases for {}: {}', + application, versions) + return versions[0] + + +def get_current_os_versions(deployed_applications, model_name=None): """Determine OpenStack codename of deployed applications. + Initially, see if the openstack-release pkg is available and use it + instead. + + If it isn't then it falls back to the existing method of checking the + version of the package passed and then resolving the version from that + using lookup tables. + :param deployed_applications: List of deployed applications :type deployed_applications: list + :param model_name: Name of model to query. + :type model_name: str :returns: List of aplication to codenames dictionaries :rtype: list """ @@ -1229,11 +1807,18 @@ def get_current_os_versions(deployed_applications): for application in UPGRADE_SERVICES: if application['name'] not in deployed_applications: continue + logging.info("looking at application: {}".format(application)) - version = generic_utils.get_pkg_version(application['name'], - application['type']['pkg']) - versions[application['name']] = ( - get_os_code_info(application['type']['pkg'], version)) + codename = get_openstack_release(application['name'], + model_name=model_name) + if codename: + versions[application['name']] = codename + else: + version = generic_utils.get_pkg_version(application['name'], + application['type']['pkg'], + model_name=model_name) + versions[application['name']] = ( + get_os_code_info(application['type']['pkg'], version)) return versions @@ -1260,10 +1845,9 @@ def get_current_os_release_pair(application='keystone'): :raises: exceptions.SeriesNotFound :raises: exceptions.OSVersionNotFound """ - machines = juju_utils.get_machines_for_application(application) - if len(machines) >= 1: - machine = machines[0] - else: + try: + machine = list(juju_utils.get_machines_for_application(application))[0] + except IndexError: raise exceptions.ApplicationNotFound(application) series = juju_utils.get_machine_series(machine) @@ -1277,15 +1861,19 @@ def get_current_os_release_pair(application='keystone'): return '{}_{}'.format(series, os_version) -def get_os_release(release_pair=None): +def get_os_release(release_pair=None, application='keystone'): """Return index of release in OPENSTACK_RELEASES_PAIRS. + :param release_pair: OpenStack release pair eg 'focal_ussuri' + :type release_pair: string + :param application: Name of application to derive release pair from. + :type application: string :returns: Index of the release :rtype: int :raises: exceptions.ReleasePairNotFound """ if release_pair is None: - release_pair = get_current_os_release_pair() + release_pair = get_current_os_release_pair(application=application) try: index = OPENSTACK_RELEASES_PAIRS.index(release_pair) except ValueError: @@ -1297,17 +1885,21 @@ def get_os_release(release_pair=None): return index -def get_application_config_option(application, option): +def get_application_config_option(application, option, model_name=None): """Return application configuration. :param application: Name of application :type application: string :param option: Specific configuration option :type option: string + :param model_name: Name of model to query. + :type model_name: str :returns: Value of configuration option :rtype: Configuration option value type """ - application_config = model.get_application_config(application) + application_config = model.get_application_config( + application, + model_name=model_name) try: return application_config.get(option).get('value') except AttributeError: @@ -1326,7 +1918,7 @@ def get_undercloud_auth(): else: logging.error('Missing OS authentication setting: OS_AUTH_URL') raise exceptions.MissingOSAthenticationException( - 'One or more OpenStack authetication variables could ' + 'One or more OpenStack authentication variables could ' 'be found in the environment. Please export the OS_* ' 'settings into the environment.') @@ -1355,7 +1947,7 @@ def get_undercloud_auth(): 'API_VERSION': 3, } if domain: - auth_settings['OS_DOMAIN_NAME': 'admin_domain'] = domain + auth_settings['OS_DOMAIN_NAME'] = domain else: auth_settings['OS_USER_DOMAIN_NAME'] = ( os.environ.get('OS_USER_DOMAIN_NAME')) @@ -1367,41 +1959,57 @@ def get_undercloud_auth(): if os_project_id is not None: auth_settings['OS_PROJECT_ID'] = os_project_id + _os_cacert = os.environ.get('OS_CACERT') + if _os_cacert: + auth_settings.update({'OS_CACERT': _os_cacert}) + # Validate settings for key, settings in list(auth_settings.items()): if settings is None: logging.error('Missing OS authentication setting: {}' ''.format(key)) raise exceptions.MissingOSAthenticationException( - 'One or more OpenStack authetication variables could ' + 'One or more OpenStack authentication variables could ' 'be found in the environment. Please export the OS_* ' 'settings into the environment.') return auth_settings -# Openstack Client helpers -def get_keystone_ip(): +# OpenStack Client helpers +def get_keystone_ip(model_name=None): """Return the IP address to use when communicating with keystone api. + :param model_name: Name of model to query. + :type model_name: str :returns: IP address :rtype: str """ - if get_application_config_option('keystone', 'vip'): - return get_application_config_option('keystone', 'vip') - unit = model.get_units('keystone')[0] + vip_option = get_application_config_option( + 'keystone', + 'vip', + model_name=model_name) + if vip_option: + return vip_option + unit = model.get_units('keystone', model_name=model_name)[0] return unit.public_address -def get_keystone_api_version(): +def get_keystone_api_version(model_name=None): """Return the keystone api version. + :param model_name: Name of model to query. + :type model_name: str :returns: Keystone's api version :rtype: int """ - os_version = get_current_os_versions('keystone')['keystone'] - api_version = get_application_config_option('keystone', - 'preferred-api-version') + os_version = get_current_os_versions( + 'keystone', + model_name=model_name)['keystone'] + api_version = get_application_config_option( + 'keystone', + 'preferred-api-version', + model_name=model_name) if os_version >= 'queens': api_version = 3 elif api_version is None: @@ -1410,15 +2018,21 @@ def get_keystone_api_version(): return int(api_version) -def get_overcloud_auth(address=None): +def get_overcloud_auth(address=None, model_name=None): """Get overcloud OpenStack authentication from the environment. + :param model_name: Name of model to query. + :type model_name: str :returns: Dictionary of authentication settings :rtype: dict """ tls_rid = model.get_relation_id('keystone', 'vault', + model_name=model_name, remote_interface_name='certificates') - ssl_config = get_application_config_option('keystone', 'ssl_cert') + ssl_config = get_application_config_option( + 'keystone', + 'ssl_cert', + model_name=model_name) if tls_rid or ssl_config: transport = 'https' port = 35357 @@ -1427,11 +2041,15 @@ def get_overcloud_auth(address=None): port = 5000 if not address: - address = get_keystone_ip() + address = get_keystone_ip(model_name=model_name) + address = network_utils.format_addr(address) - password = juju_utils.leader_get('keystone', 'admin_passwd') + password = juju_utils.leader_get( + 'keystone', + 'admin_passwd', + model_name=model_name) - if get_keystone_api_version() == 2: + if get_keystone_api_version(model_name=model_name) == 2: # V2 Explicitly, or None when charm does not possess the config key logging.info('Using keystone API V2 for overcloud auth') auth_settings = { @@ -1456,33 +2074,96 @@ def get_overcloud_auth(address=None): 'OS_PROJECT_DOMAIN_NAME': 'admin_domain', 'API_VERSION': 3, } - if tls_rid: - unit = model.get_first_unit_name('keystone') - model.scp_from_unit( - unit, - KEYSTONE_REMOTE_CACERT, - KEYSTONE_LOCAL_CACERT) - - if os.path.exists(KEYSTONE_LOCAL_CACERT): - os.chmod(KEYSTONE_LOCAL_CACERT, 0o644) - auth_settings['OS_CACERT'] = KEYSTONE_LOCAL_CACERT + local_ca_cert = get_remote_ca_cert_file('keystone', model_name=model_name) + if local_ca_cert: + auth_settings['OS_CACERT'] = local_ca_cert return auth_settings +async def _async_get_remote_ca_cert_file_candidates(application, + model_name=None): + """Return a list of possible remote CA file names. + + :param application: Name of application to examine. + :type application: str + :param model_name: Name of model to query. + :type model_name: str + :returns: List of paths to possible ca files. + :rtype: List[str] + """ + cert_files = [] + for _provider in CERT_PROVIDERS: + tls_rid = await model.async_get_relation_id( + application, + _provider, + model_name=model_name, + remote_interface_name='certificates') + if tls_rid: + cert_files.append( + REMOTE_CERT_DIR + '/' + CACERT_FILENAME_FORMAT.format( + _provider)) + cert_files.append(KEYSTONE_REMOTE_CACERT) + return cert_files + +_get_remote_ca_cert_file_candidates = zaza.model.sync_wrapper( + _async_get_remote_ca_cert_file_candidates) + + +def get_remote_ca_cert_file(application, model_name=None): + """Collect CA certificate from application. + + :param application: Name of application to collect file from. + :type application: str + :param model_name: Name of model to query. + :type model_name: str + :returns: Path to cafile + :rtype: str + """ + unit = model.get_first_unit_name(application, model_name=model_name) + local_cert_file = None + cert_files = _get_remote_ca_cert_file_candidates( + application, + model_name=model_name) + for cert_file in cert_files: + _local_cert_file = get_cacert_absolute_path( + os.path.basename(cert_file)) + with tempfile.NamedTemporaryFile(mode="w", delete=False) as _tmp_ca: + try: + model.scp_from_unit( + unit, + cert_file, + _tmp_ca.name) + except JujuError: + continue + # ensure that the path to put the local cacert in actually exists. + # The assumption that 'tests/' exists for, say, mojo is false. + # Needed due to: + # commit: 537473ad3addeaa3d1e4e2d0fd556aeaa4018eb2 + _dir = os.path.dirname(_local_cert_file) + if not os.path.exists(_dir): + os.makedirs(_dir) + shutil.move(_tmp_ca.name, _local_cert_file) + os.chmod(_local_cert_file, 0o644) + local_cert_file = _local_cert_file + break + return local_cert_file + + def get_urllib_opener(): """Create a urllib opener taking into account proxy settings. Using urllib.request.urlopen will automatically handle proxies so none of this function is needed except we are currently specifying proxies - via OS_TEST_HTTP_PROXY rather than http_proxy so a ProxyHandler is needed + via TEST_HTTP_PROXY rather than http_proxy so a ProxyHandler is needed explicitly stating the proxies. :returns: An opener which opens URLs via BaseHandlers chained together :rtype: urllib.request.OpenerDirector """ - http_proxy = os.getenv('OS_TEST_HTTP_PROXY') - logging.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy)) + deploy_env = deployment_env.get_deployment_context() + http_proxy = deploy_env.get('TEST_HTTP_PROXY') + logging.debug('TEST_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: handler = urllib.request.ProxyHandler({'http': http_proxy}) @@ -1540,7 +2221,8 @@ def download_image(image_url, target_file): def _resource_reaches_status(resource, resource_id, expected_status='available', - msg='resource'): + msg='resource', + resource_attribute='status'): """Wait for an openstack resources status to reach an expected status. Wait for an openstack resources status to reach an expected status @@ -1555,20 +2237,22 @@ def _resource_reaches_status(resource, resource_id, :param expected_status: status to expect resource to reach :type expected_status: str :param msg: text to identify purpose in logging - :type msy: str + :type msg: str + :param resource_attribute: Resource attribute to check against + :type resource_attribute: str :raises: AssertionError """ - resource_status = resource.get(resource_id).status - logging.info(resource_status) - assert resource_status == expected_status, ( - "Resource in {} state, waiting for {}" .format(resource_status, - expected_status,)) + resource_status = getattr(resource.get(resource_id), resource_attribute) + logging.info("{}: resource {} in {} state, waiting for {}".format( + msg, resource_id, resource_status, expected_status)) + assert resource_status == expected_status def resource_reaches_status(resource, resource_id, expected_status='available', msg='resource', + resource_attribute='status', wait_exponential_multiplier=1, wait_iteration_max_time=60, stop_after_attempt=8, @@ -1588,6 +2272,8 @@ def resource_reaches_status(resource, :type expected_status: str :param msg: text to identify purpose in logging :type msg: str + :param resource_attribute: Resource attribute to check against + :type resource_attribute: str :param wait_exponential_multiplier: Wait 2^x * wait_exponential_multiplier seconds between each retry :type wait_exponential_multiplier: int @@ -1609,7 +2295,8 @@ def resource_reaches_status(resource, resource, resource_id, expected_status, - msg) + msg, + resource_attribute) def _resource_removed(resource, resource_id, msg="resource"): @@ -1624,8 +2311,8 @@ def _resource_removed(resource, resource_id, msg="resource"): :raises: AssertionError """ matching = [r for r in resource.list() if r.id == resource_id] - logging.debug("Resource {} still present".format(resource_id)) - assert len(matching) == 0, "Resource {} still present".format(resource_id) + logging.debug("{}: resource {} still present".format(msg, resource_id)) + assert len(matching) == 0 def resource_removed(resource, @@ -1719,7 +2406,8 @@ def delete_volume_backup(cinder, vol_backup_id): def upload_image_to_glance(glance, local_path, image_name, disk_format='qcow2', - visibility='public', container_format='bare'): + visibility='public', container_format='bare', + backend=None, force_import=False): """Upload the given image to glance and apply the given label. :param glance: Authenticated glanceclient @@ -1736,6 +2424,9 @@ def upload_image_to_glance(glance, local_path, image_name, disk_format='qcow2', format that also contains metadata about the actual virtual machine. :type container_format: str + :param force_import: Force the use of glance image import + instead of direct upload + :type force_import: boolean :returns: glance image pointer :rtype: glanceclient.common.utils.RequestIdProxy """ @@ -1745,7 +2436,15 @@ def upload_image_to_glance(glance, local_path, image_name, disk_format='qcow2', disk_format=disk_format, visibility=visibility, container_format=container_format) - glance.images.upload(image.id, open(local_path, 'rb')) + + if force_import: + logging.info('Forcing image import') + glance.images.stage(image.id, open(local_path, 'rb')) + glance.images.image_import( + image.id, method='glance-direct', backend=backend) + else: + glance.images.upload( + image.id, open(local_path, 'rb'), backend=backend) resource_reaches_status( glance.images, @@ -1756,7 +2455,10 @@ def upload_image_to_glance(glance, local_path, image_name, disk_format='qcow2', return image -def create_image(glance, image_url, image_name, image_cache_dir=None, tags=[]): +def create_image(glance, image_url, image_name, image_cache_dir=None, tags=[], + properties=None, backend=None, disk_format='qcow2', + visibility='public', container_format='bare', + force_import=False): """Download the image and upload it to glance. Download an image from image_url and upload it to glance labelling @@ -1773,6 +2475,11 @@ def create_image(glance, image_url, image_name, image_cache_dir=None, tags=[]): :type image_cache_dir: Option[str, None] :param tags: Tags to add to image :type tags: list of str + :param properties: Properties and values to add to image + :type properties: dict + :param force_import: Force the use of glance image import + instead of direct upload + :type force_import: boolean :returns: glance image pointer :rtype: glanceclient.common.utils.RequestIdProxy """ @@ -1786,14 +2493,26 @@ def create_image(glance, image_url, image_name, image_cache_dir=None, tags=[]): local_path = os.path.join(image_cache_dir, img_name) if not os.path.exists(local_path): + logging.info('Downloading {} ...'.format(image_url)) download_image(image_url, local_path) + else: + logging.info('Cached image found at {} - Skipping download'.format( + local_path)) - image = upload_image_to_glance(glance, local_path, image_name) + image = upload_image_to_glance( + glance, local_path, image_name, backend=backend, + disk_format=disk_format, visibility=visibility, + container_format=container_format, force_import=force_import) for tag in tags: result = glance.image_tags.update(image.id, tag) logging.debug( 'applying tag to image: glance.image_tags.update({}, {}) = {}' .format(image.id, tags, result)) + + logging.info("Setting image properties: {}".format(properties)) + if properties: + result = glance.images.update(image.id, **properties) + return image @@ -1826,6 +2545,62 @@ def create_volume(cinder, size, name=None, image=None): return volume +def attach_volume(nova, volume_id, instance_id): + """Attach a cinder volume to a nova instance. + + :param nova: Authenticated nova client + :type nova: novaclient.v2.client.Client + :param volume_id: the id of the volume to attach + :type volume_id: str + :param instance_id: the id of the instance to attach the volume to + :type instance_id: str + :returns: nova volume pointer + :rtype: novaclient.v2.volumes.Volume + """ + logging.info( + 'Attaching volume {} to instance {}'.format( + volume_id, instance_id + ) + ) + return nova.volumes.create_server_volume(server_id=instance_id, + volume_id=volume_id, + device='/dev/vdx') + + +def failover_cinder_volume_host(cinder, backend_name='cinder-ceph', + target_backend_id='ceph', + target_status='disabled', + target_replication_status='failed-over'): + """Failover Cinder volume host with replication enabled. + + :param cinder: Authenticated cinderclient + :type cinder: cinder.Client + :param backend_name: Cinder volume backend name with + replication enabled. + :type backend_name: str + :param target_backend_id: Failover target Cinder backend id. + :type target_backend_id: str + :param target_status: Target Cinder volume status after failover. + :type target_status: str + :param target_replication_status: Target Cinder volume replication + status after failover. + :type target_replication_status: str + :raises: AssertionError + """ + host = 'cinder@{}'.format(backend_name) + logging.info('Failover Cinder volume host %s to backend_id %s', + host, target_backend_id) + cinder.services.failover_host(host=host, backend_id=target_backend_id) + for attempt in tenacity.Retrying( + retry=tenacity.retry_if_exception_type(AssertionError), + stop=tenacity.stop_after_attempt(10), + wait=tenacity.wait_exponential(multiplier=1, min=2, max=10)): + with attempt: + svc = cinder.services.list(host=host, binary='cinder-volume')[0] + assert svc.status == target_status + assert svc.replication_status == target_replication_status + + def create_volume_backup(cinder, volume_id, name=None): """Create cinder volume backup. @@ -1872,7 +2647,7 @@ def create_ssh_key(nova_client, keypair_name, replace=False): :param nova_client: Authenticated nova client :type nova_client: novaclient.v2.client.Client - :param keypair_name: Label to apply to keypair in Openstack. + :param keypair_name: Label to apply to keypair in OpenStack. :type keypair_name: str :param replace: Whether to replace the existing keypair if it already exists. @@ -1895,18 +2670,19 @@ def create_ssh_key(nova_client, keypair_name, replace=False): def get_private_key_file(keypair_name): """Location of the file containing the private key with the given label. - :param keypair_name: Label of keypair in Openstack. + :param keypair_name: Label of keypair in OpenStack. :type keypair_name: str :returns: Path to file containing key :rtype: str """ - return 'tests/id_rsa_{}'.format(keypair_name) + tmp_dir = deployment_env.get_tmpdir() + return '{}/id_rsa_{}'.format(tmp_dir, keypair_name) def write_private_key(keypair_name, key): """Store supplied private key in file. - :param keypair_name: Label of keypair in Openstack. + :param keypair_name: Label of keypair in OpenStack. :type keypair_name: str :param key: PEM Encoded Private Key :type key: str @@ -1918,7 +2694,7 @@ def write_private_key(keypair_name, key): def get_private_key(keypair_name): """Return private key. - :param keypair_name: Label of keypair in Openstack. + :param keypair_name: Label of keypair in OpenStack. :type keypair_name: str :returns: PEM Encoded Private Key :rtype: str @@ -1932,11 +2708,11 @@ def get_private_key(keypair_name): def get_public_key(nova_client, keypair_name): - """Return public key from Openstack. + """Return public key from OpenStack. :param nova_client: Authenticated nova client :type nova_client: novaclient.v2.client.Client - :param keypair_name: Label of keypair in Openstack. + :param keypair_name: Label of keypair in OpenStack. :type keypair_name: str :returns: OpenSSH Encoded Public Key :rtype: str or None @@ -1953,7 +2729,7 @@ def valid_key_exists(nova_client, keypair_name): :param nova_client: Authenticated nova client :type nova_client: novaclient.v2.client.Client - :param keypair_name: Label of keypair in Openstack. + :param keypair_name: Label of keypair in OpenStack. :type keypair_name: str """ pub_key = get_public_key(nova_client, keypair_name) @@ -1980,8 +2756,8 @@ def get_ports_from_device_id(neutron_client, device_id): return ports -@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60), - reraise=True, stop=tenacity.stop_after_attempt(8)) +@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=120), + reraise=True, stop=tenacity.stop_after_delay(1800)) def cloud_init_complete(nova_client, vm_id, bootstring): """Wait for cloud init to complete on the given vm. @@ -2000,11 +2776,13 @@ def cloud_init_complete(nova_client, vm_id, bootstring): instance = nova_client.servers.find(id=vm_id) console_log = instance.get_console_output() if bootstring not in console_log: - raise exceptions.CloudInitIncomplete() + raise exceptions.CloudInitIncomplete( + "'{}' not found in console log: {}" + .format(bootstring, console_log)) @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60), - reraise=True, stop=tenacity.stop_after_attempt(8)) + reraise=True, stop=tenacity.stop_after_attempt(16)) def ping_response(ip): """Wait for ping to respond on the given IP. @@ -2013,10 +2791,11 @@ def ping_response(ip): :raises: subprocess.CalledProcessError """ cmd = ['ping', '-c', '1', '-W', '1', ip] - subprocess.check_call(cmd, stdout=subprocess.DEVNULL) + subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + check=True) -def ssh_test(username, ip, vm_name, password=None, privkey=None): +def ssh_test(username, ip, vm_name, password=None, privkey=None, retry=True): """SSH to given ip using supplied credentials. :param username: Username to connect with @@ -2031,6 +2810,9 @@ def ssh_test(username, ip, vm_name, password=None, privkey=None): :param privkey: Private key to authenticate with. If a password is supplied it is used rather than the private key. :type privkey: str + :param retry: If True, retry a few times if an exception is raised in the + process, e.g. on connection failure. + :type retry: boolean :raises: exceptions.SSHFailed """ def verify(stdin, stdout, stderr): @@ -2044,8 +2826,18 @@ def ssh_test(username, ip, vm_name, password=None, privkey=None): vm_name)) raise exceptions.SSHFailed() - ssh_command(username, ip, vm_name, 'uname -n', - password=password, privkey=privkey, verify=verify) + # NOTE(lourot): paramiko.SSHClient().connect() calls read_all() which can + # raise an EOFError, see + # * https://docs.paramiko.org/en/stable/api/packet.html + # * https://github.com/paramiko/paramiko/issues/925 + # So retrying a few times makes sense. + for attempt in tenacity.Retrying( + stop=tenacity.stop_after_attempt(3 if retry else 1), + wait=tenacity.wait_exponential(multiplier=1, min=2, max=10), + reraise=True): + with attempt: + ssh_command(username, ip, vm_name, 'uname -n', + password=password, privkey=privkey, verify=verify) def ssh_command(username, @@ -2082,7 +2874,7 @@ def ssh_command(username, ssh.connect(ip, username=username, password=password) else: key = paramiko.RSAKey.from_private_key(io.StringIO(privkey)) - ssh.connect(ip, username=username, password='', pkey=key) + ssh.connect(ip, username=username, password=None, pkey=key) logging.info("Running {} on {}".format(command, vm_name)) stdin, stdout, stderr = ssh.exec_command(command) if verify and callable(verify): @@ -2239,7 +3031,8 @@ def get_keystone_session_from_relation(client_app, identity_app='keystone', relation_name='identity-service', scope='PROJECT', - verify=None): + verify=None, + model_name=None): """Extract credentials information from a relation & return a session. :param client_app: Name of application receiving credentials. @@ -2255,16 +3048,19 @@ def get_keystone_session_from_relation(client_app, False - do not verify, None - defer to requests library to find certs, str - path to a CA cert bundle) + :param model_name: Name of model to query. + :type model_name: str :returns: Keystone session object :rtype: keystoneauth1.session.Session object """ relation = juju_utils.get_relation_from_unit( client_app, identity_app, - relation_name) + relation_name, + model_name=model_name) api_version = int(relation.get('api_version', 2)) - creds = get_overcloud_auth() + creds = get_overcloud_auth(model_name=model_name) creds['OS_USERNAME'] = relation['service_username'] creds['OS_PASSWORD'] = relation['service_password'] creds['OS_PROJECT_NAME'] = relation['service_tenant'] diff --git a/zaza/openstack/utilities/openstack_upgrade.py b/zaza/openstack/utilities/openstack_upgrade.py new file mode 100755 index 0000000..c279e58 --- /dev/null +++ b/zaza/openstack/utilities/openstack_upgrade.py @@ -0,0 +1,343 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for performing OpenStack upgrades. + +This module contains a number of functions for upgrading OpenStack. +""" +import logging +import zaza.openstack.utilities.juju as juju_utils + +import zaza.model +from zaza import sync_wrapper +from zaza.openstack.utilities.upgrade_utils import ( + get_upgrade_groups, +) + + +async def async_pause_units(units, model_name=None): + """Pause all units in unit list. + + Pause all units in unit list. Wait for pause action + to complete. + + :param units: List of unit names. + :type units: [] + :param model_name: Name of model to query. + :type model_name: str + :rtype: juju.action.Action + :raises: zaza.model.ActionFailed + """ + logging.info("Pausing {}".format(', '.join(units))) + await zaza.model.async_run_action_on_units( + units, + 'pause', + model_name=model_name, + raise_on_failure=True) + +pause_units = sync_wrapper(async_pause_units) + + +async def async_resume_units(units, model_name=None): + """Resume all units in unit list. + + Resume all units in unit list. Wait for resume action + to complete. + + :param units: List of unit names. + :type units: [] + :param model_name: Name of model to query. + :type model_name: str + :rtype: juju.action.Action + :raises: zaza.model.ActionFailed + """ + logging.info("Resuming {}".format(', '.join(units))) + await zaza.model.async_run_action_on_units( + units, + 'resume', + model_name=model_name, + raise_on_failure=True) + +resume_units = sync_wrapper(async_resume_units) + + +async def async_action_unit_upgrade(units, model_name=None): + """Run openstack-upgrade on all units in unit list. + + Upgrade payload on all units in unit list. Wait for action + to complete. + + :param units: List of unit names. + :type units: [] + :param model_name: Name of model to query. + :type model_name: str + :rtype: juju.action.Action + :raises: zaza.model.ActionFailed + """ + logging.info("Upgrading {}".format(', '.join(units))) + await zaza.model.async_run_action_on_units( + units, + 'openstack-upgrade', + model_name=model_name, + raise_on_failure=True) + +action_unit_upgrade = sync_wrapper(async_action_unit_upgrade) + + +def action_upgrade_apps(applications, model_name=None): + """Upgrade units in the applications using action managed upgrades. + + Upgrade all units of the given applications using action managed upgrades. + This involves the following process: + 1) Take a unit from each application which has not been upgraded yet. + 2) Pause all hacluster units assocaiated with units to be upgraded. + 3) Pause target units. + 4) Upgrade target units. + 5) Resume target units. + 6) Resume hacluster units paused in step 2. + 7) Repeat until all units are upgraded. + + :param applications: List of application names. + :type applications: [] + :param model_name: Name of model to query. + :type model_name: str + """ + status = zaza.model.get_status(model_name=model_name) + done = [] + while True: + target = [] + for app in applications: + for unit in zaza.model.get_units(app, model_name=model_name): + if unit.entity_id not in done: + target.append(unit.entity_id) + break + else: + logging.info("All units of {} upgraded".format(app)) + if not target: + break + hacluster_units = juju_utils.get_subordinate_units( + target, + 'hacluster', + status=status, + model_name=model_name) + + pause_units(hacluster_units, model_name=model_name) + pause_units(target, model_name=model_name) + + action_unit_upgrade(target, model_name=model_name) + + resume_units(target, model_name=model_name) + resume_units(hacluster_units, model_name=model_name) + + done.extend(target) + + # Ensure that mysql-innodb-cluster has at least one R/W group (it can get + # into a state where all are R/O whilst it is sorting itself out after an + # openstack_upgrade + if "mysql-innodb-cluster" in applications: + block_until_mysql_innodb_cluster_has_rw(model_name) + + # Now we need to wait for the model to go back to idle. + zaza.model.block_until_all_units_idle(model_name) + + +async def async_block_until_mysql_innodb_cluster_has_rw(model=None, + timeout=None): + """Block until the mysql-innodb-cluster is in a healthy state. + + Curiously, after a series of pauses and restarts (e.g. during an upgrade) + the mysql-innodb-cluster charms may not yet have agreed which one is the + R/W node; i.e. they are all R/O. Anyway, eventually they sort it out and + one jumps to the front and says "it's me!". This is detected, externally, + by the status line including R/W in the output. + + This function blocks until that happens so that no charm attempts to have a + chat with the mysql server before it has settled, thus breaking the whole + test. + """ + async def async_check_workload_messages_for_rw(model=None): + """Return True if a least one work message contains R/W.""" + status = await zaza.model.async_get_status() + app_status = status.applications.get("mysql-innodb-cluster") + units_data = app_status.units.values() + workload_statuses = [d.workload_status.info for d in units_data] + return any("R/W" in s for s in workload_statuses) + + await zaza.model.async_block_until(async_check_workload_messages_for_rw, + timeout=timeout) + + +block_until_mysql_innodb_cluster_has_rw = sync_wrapper( + async_block_until_mysql_innodb_cluster_has_rw) + + +def set_upgrade_application_config(applications, new_source, + action_managed=True, model_name=None): + """Set the charm config for upgrade. + + Set the charm config for upgrade. + + :param applications: List of application names. + :type applications: List[str] + :param new_source: New package origin. + :type new_source: str + :param action_managed: Whether to set action-managed-upgrade config option. + :type action_managed: bool + :param model_name: Name of model to query. + :type model_name: str + """ + for app in applications: + src_option = 'openstack-origin' + charm_options = zaza.model.get_application_config( + app, model_name=model_name) + try: + charm_options[src_option] + except KeyError: + src_option = 'source' + config = { + src_option: new_source} + if action_managed: + config['action-managed-upgrade'] = 'True' + logging.info("Setting config for {} to {}".format(app, config)) + zaza.model.set_application_config( + app, + config, + model_name=model_name) + + +def is_action_upgradable(app, model_name=None): + """Can application be upgraded using action managed upgrade method. + + :param app: The application to check + :type app: str + :param model_name: Name of model to query. + :type model_name: str + :returns: Whether app be upgraded using action managed upgrade method. + :rtype: bool + """ + config = zaza.model.get_application_config(app, model_name=model_name) + try: + config['action-managed-upgrade'] + supported = True + except KeyError: + supported = False + return supported + + +def is_already_upgraded(app, new_src, model_name=None): + """Return True if the app has already been upgraded. + + :param app: The application to check + :type app: str + :param new_src: the new source (distro, cloud:x-y, etc.) + :type new_src: str + :param model_name: Name of model to query. + :type model_name: str + :returns: Whether app be upgraded using action managed upgrade method. + :rtype: bool + """ + config = zaza.model.get_application_config(app, model_name=model_name) + try: + src = config['openstack-origin']['value'] + key_was = 'openstack-origin' + except KeyError: + src = config['source']['value'] + key_was = 'source' + logging.info("origin for {} is {}={}".format(app, key_was, src)) + return src == new_src + + +def run_action_upgrades(apps, new_source, model_name=None): + """Upgrade payload of all applications in group using action upgrades. + + :param apps: List of applications to upgrade. + :type apps: List[str] + :param new_source: New package origin. + :type new_source: str + :param model_name: Name of model to query. + :type model_name: str + """ + set_upgrade_application_config(apps, new_source, model_name=model_name) + action_upgrade_apps(apps, model_name=model_name) + + +def run_all_in_one_upgrades(apps, new_source, model_name=None): + """Upgrade payload of all applications in group using all-in-one method. + + :param apps: List of applications to upgrade. + :type apps: List[str] + :source: New package origin. + :type new_source: str + :param model_name: Name of model to query. + :type model_name: str + """ + set_upgrade_application_config( + apps, + new_source, + model_name=model_name, + action_managed=False) + zaza.model.block_until_all_units_idle() + + +def run_upgrade_on_apps(apps, new_source, model_name=None): + """Upgrade payload of all applications in group. + + Upgrade apps using action managed upgrades where possible and fallback to + all_in_one method. + + :param apps: List of applications to upgrade. + :type apps: [] + :param new_source: New package origin. + :type new_source: str + :param model_name: Name of model to query. + :type model_name: str + """ + action_upgrades = [] + all_in_one_upgrades = [] + for app in apps: + if is_already_upgraded(app, new_source, model_name=model_name): + logging.info("Application '%s' is already upgraded. Skipping.", + app) + continue + if is_action_upgradable(app, model_name=model_name): + action_upgrades.append(app) + else: + all_in_one_upgrades.append(app) + if all_in_one_upgrades: + run_all_in_one_upgrades( + all_in_one_upgrades, + new_source, + model_name=model_name) + if action_upgrades: + run_action_upgrades( + action_upgrades, + new_source, + model_name=model_name) + + +def run_upgrade_tests(new_source, model_name=None): + """Upgrade payload of all applications in model. + + This the most basic upgrade test. It should be adapted to add/remove + elements from the environment and add tests at intermediate stages. + + :param new_source: New package origin. + :type new_source: str + :param model_name: Name of model to query. + :type model_name: str + """ + groups = get_upgrade_groups(model_name=model_name) + for name, apps in groups: + logging.info("Performing upgrade of %s", name) + run_upgrade_on_apps(apps, new_source, model_name=model_name) diff --git a/zaza/openstack/utilities/os_versions.py b/zaza/openstack/utilities/os_versions.py index cd81a84..beaedec 100644 --- a/zaza/openstack/utilities/os_versions.py +++ b/zaza/openstack/utilities/os_versions.py @@ -34,6 +34,9 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('cosmic', 'rocky'), ('disco', 'stein'), ('eoan', 'train'), + ('focal', 'ussuri'), + ('groovy', 'victoria'), + ('hirsute', 'wallaby'), ]) @@ -55,6 +58,8 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2018.2', 'rocky'), ('2019.1', 'stein'), ('2019.2', 'train'), + ('2020.1', 'ussuri'), + ('2020.2', 'victoria'), ]) OPENSTACK_RELEASES_PAIRS = [ @@ -64,9 +69,10 @@ OPENSTACK_RELEASES_PAIRS = [ 'xenial_pike', 'artful_pike', 'xenial_queens', 'bionic_queens', 'bionic_rocky', 'cosmic_rocky', 'bionic_stein', 'disco_stein', 'bionic_train', - 'eoan_train'] + 'eoan_train', 'bionic_ussuri', 'focal_ussuri', + 'focal_victoria', 'groovy_victoria', + 'focal_wallaby', 'hirsute_wallaby'] -# The ugly duckling - must list releases oldest to newest SWIFT_CODENAMES = OrderedDict([ ('diablo', ['1.4.3']), @@ -102,6 +108,21 @@ SWIFT_CODENAMES = OrderedDict([ ['2.20.0', '2.21.0']), ('train', ['2.22.0']), + ('ussuri', + ['2.24.0', '2.25.0']), + ('victoria', + ['2.25.0']), +]) + +OVN_CODENAMES = OrderedDict([ + ('train', + ['2.12']), + ('ussuri', + ['20.03']), + ('victoria', + ['20.06']), + ('wallaby', + ['20.12']), ]) # >= Liberty version->codename mapping @@ -116,6 +137,8 @@ PACKAGE_CODENAMES = { ('18', 'rocky'), ('19', 'stein'), ('20', 'train'), + ('21', 'ussuri'), + ('22', 'victoria'), ]), 'neutron-common': OrderedDict([ ('7', 'liberty'), @@ -127,6 +150,8 @@ PACKAGE_CODENAMES = { ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), + ('17', 'victoria'), ]), 'cinder-common': OrderedDict([ ('7', 'liberty'), @@ -138,6 +163,8 @@ PACKAGE_CODENAMES = { ('13', 'rocky'), ('14', 'stein'), ('15', 'train'), + ('16', 'ussuri'), + ('17', 'victoria'), ]), 'keystone': OrderedDict([ ('8', 'liberty'), @@ -149,6 +176,8 @@ PACKAGE_CODENAMES = { ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('17', 'ussuri'), + ('18', 'victoria'), ]), 'horizon-common': OrderedDict([ ('8', 'liberty'), @@ -160,6 +189,8 @@ PACKAGE_CODENAMES = { ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('18', 'ussuri'), + ('19', 'victoria'), ]), 'ceilometer-common': OrderedDict([ ('5', 'liberty'), @@ -171,6 +202,8 @@ PACKAGE_CODENAMES = { ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), + ('15', 'victoria'), ]), 'heat-common': OrderedDict([ ('5', 'liberty'), @@ -182,6 +215,8 @@ PACKAGE_CODENAMES = { ('11', 'rocky'), ('12', 'stein'), ('13', 'train'), + ('14', 'ussuri'), + ('15', 'victoria'), ]), 'glance-common': OrderedDict([ ('11', 'liberty'), @@ -193,6 +228,8 @@ PACKAGE_CODENAMES = { ('17', 'rocky'), ('18', 'stein'), ('19', 'train'), + ('20', 'ussuri'), + ('21', 'victoria'), ]), 'openstack-dashboard': OrderedDict([ ('8', 'liberty'), @@ -204,5 +241,32 @@ PACKAGE_CODENAMES = { ('14', 'rocky'), ('15', 'stein'), ('16', 'train'), + ('18', 'ussuri'), + ('19', 'victoria'), + ]), + 'designate-common': OrderedDict([ + ('1', 'liberty'), + ('2', 'mitaka'), + ('3', 'newton'), + ('4', 'ocata'), + ('5', 'pike'), + ('6', 'queens'), + ('7', 'rocky'), + ('8', 'stein'), + ('9', 'train'), + ('10', 'ussuri'), + ('11', 'victoria'), + ]), + 'ceph-common': OrderedDict([ + ('10', 'mitaka'), # jewel + ('12', 'queens'), # luminous + ('13', 'rocky'), # mimic + ('14', 'train'), # nautilus + ('15', 'ussuri'), # octopus + ]), + 'placement-common': OrderedDict([ + ('2', 'train'), + ('3', 'ussuri'), + ('4', 'victoria'), ]), } diff --git a/zaza/openstack/utilities/parallel_series_upgrade.py b/zaza/openstack/utilities/parallel_series_upgrade.py new file mode 100755 index 0000000..610496d --- /dev/null +++ b/zaza/openstack/utilities/parallel_series_upgrade.py @@ -0,0 +1,646 @@ +#!/usr/bin/env python3 +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of functions for testing series upgrade in parallel.""" + + +import asyncio + +import collections +import copy +import logging +import subprocess + +from zaza import model +from zaza.charm_lifecycle import utils as cl_utils +import zaza.openstack.utilities.generic as os_utils +import zaza.openstack.utilities.series_upgrade as series_upgrade_utils +from zaza.openstack.utilities.series_upgrade import ( + SUBORDINATE_PAUSE_RESUME_BLACKLIST, +) + + +def app_config(charm_name): + """Return a dict with the upgrade config for an application. + + :param charm_name: Name of the charm about to upgrade + :type charm_name: str + :param async: Whether the upgreade functions should be async + :type async: bool + :returns: A dicitonary of the upgrade config for the application + :rtype: Dict + """ + default = { + 'origin': 'openstack-origin', + 'pause_non_leader_subordinate': True, + 'pause_non_leader_primary': True, + 'post_upgrade_functions': [], + 'pre_upgrade_functions': [], + 'post_application_upgrade_functions': [], + 'follower_first': False, } + _app_settings = collections.defaultdict(lambda: default) + ceph = { + 'origin': "source", + 'pause_non_leader_primary': False, + 'pause_non_leader_subordinate': False, + } + exceptions = { + 'rabbitmq-server': { + # NOTE: AJK disable config-changed on rabbitmq-server due to bug: + # #1896520 + 'origin': None, + 'pause_non_leader_subordinate': False, + 'post_application_upgrade_functions': [ + ('zaza.openstack.charm_tests.rabbitmq_server.utils.' + 'complete_cluster_series_upgrade')] + }, + 'percona-cluster': { + 'origin': 'source', + 'post_application_upgrade_functions': [ + ('zaza.openstack.charm_tests.mysql.utils.' + 'complete_cluster_series_upgrade')] + }, + 'nova-compute': { + 'pause_non_leader_primary': False, + 'pause_non_leader_subordinate': False, + # TODO + # 'pre_upgrade_functions': [ + # 'zaza.openstack.charm_tests.nova_compute.setup.evacuate' + # ] + }, + 'ceph': ceph, + 'ceph-mon': ceph, + 'ceph-osd': ceph, + 'designate-bind': {'origin': None, }, + 'tempest': {'origin': None, }, + 'memcached': { + 'origin': None, + 'pause_non_leader_primary': False, + 'pause_non_leader_subordinate': False, + }, + 'vault': { + 'origin': None, + 'pause_non_leader_primary': False, + 'pause_non_leader_subordinate': True, + 'post_upgrade_functions': [ + ('zaza.openstack.charm_tests.vault.setup.' + 'async_mojo_or_default_unseal_by_unit')] + }, + 'mongodb': { + 'origin': None, + 'follower_first': True, + } + } + for key, value in exceptions.items(): + _app_settings[key] = copy.deepcopy(default) + _app_settings[key].update(value) + return _app_settings[charm_name] + + +def upgrade_ubuntu_lite(from_series='xenial', to_series='bionic'): + """Validate that we can upgrade the ubuntu-lite charm. + + :param from_series: What series are we upgrading from + :type from_series: str + :param to_series: What series are we upgrading to + :type to_series: str + """ + completed_machines = [] + asyncio.get_event_loop().run_until_complete( + parallel_series_upgrade( + 'ubuntu-lite', pause_non_leader_primary=False, + pause_non_leader_subordinate=False, + from_series=from_series, to_series=to_series, + completed_machines=completed_machines, origin=None) + ) + + +async def parallel_series_upgrade( + application, + from_series='xenial', + to_series='bionic', + origin='openstack-origin', + pause_non_leader_primary=True, + pause_non_leader_subordinate=True, + pre_upgrade_functions=None, + post_upgrade_functions=None, + post_application_upgrade_functions=None, + completed_machines=None, + follower_first=False, + files=None, + workaround_script=None +): + """Perform series upgrade on an application in parallel. + + :param unit_name: Unit Name + :type unit_name: str + :param machine_num: Machine number + :type machine_num: str + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param origin: The configuration setting variable name for changing origin + source. (openstack-origin or source) + :type origin: str + :param pause_non_leader_primary: Whether the non-leader applications should + be paused + :type pause_non_leader_primary: bool + :param pause_non_leader_subordinate: Whether the non-leader subordinate + hacluster applications should be + paused + :type pause_non_leader_subordinate: bool + + :param pre_upgrade_functions: A list of Zaza functions to call before + the upgrade is started on each machine + :type pre_upgrade_functions: List[str] + :param post_upgrade_functions: A list of Zaza functions to call when + the upgrade is complete on each machine + :type post_upgrade_functions: List[str] + :param post_application_upgrade_functions: A list of Zaza functions + to call when the upgrade is complete + on all machine in the application + :param follower_first: Should the follower(s) be upgraded first + :type follower_first: bool + :type post_application_upgrade_functions: List[str] + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :returns: None + :rtype: None + """ + if completed_machines is None: + completed_machines = [] + if follower_first: + logging.error("leader_first is ignored for parallel upgrade") + logging.info( + "About to upgrade the units of {} in parallel (follower first: {})" + .format(application, follower_first)) + + status = (await model.async_get_status()).applications[application] + logging.info( + "Configuring leader / non leaders for {}".format(application)) + leaders, non_leaders = get_leader_and_non_leaders(status) + for leader_unit in leaders.values(): + leader_machine = leader_unit["machine"] + machines = [unit["machine"] for name, unit in non_leaders.items() + if unit['machine'] not in completed_machines] + + await maybe_pause_things( + status, + non_leaders, + pause_non_leader_subordinate, + pause_non_leader_primary) + # wait for the entire application set to be idle before starting upgrades + await asyncio.gather(*[ + model.async_wait_for_unit_idle(unit, include_subordinates=True) + for unit in status["units"]]) + await prepare_series_upgrade(leader_machine, to_series=to_series) + await asyncio.gather(*[ + wait_for_idle_then_prepare_series_upgrade( + machine, to_series=to_series) + for machine in machines]) + if leader_machine not in completed_machines: + machines.append(leader_machine) + await asyncio.gather(*[ + series_upgrade_machine( + machine, + origin=origin, + application=application, + files=files, workaround_script=workaround_script, + post_upgrade_functions=post_upgrade_functions) + for machine in machines]) + completed_machines.extend(machines) + await series_upgrade_utils.async_set_series( + application, to_series=to_series) + await run_post_application_upgrade_functions( + post_application_upgrade_functions) + + +async def wait_for_idle_then_prepare_series_upgrade( + machine, to_series, model_name=None): + """Wait for the units to idle the do prepare_series_upgrade. + + We need to be sure that all the units are idle prior to actually calling + prepare_series_upgrade() as otherwise the call will fail. It has to be + checked because when the leader is paused it may kick off relation hooks in + the other units in an HA group. + + :param machine: the machine that is going to be prepared + :type machine: str + :param to_series: The series to which to upgrade + :type to_series: str + :param model_name: Name of model to query. + :type model_name: str + """ + await model.async_block_until_units_on_machine_are_idle( + machine, model_name=model_name) + await prepare_series_upgrade(machine, to_series=to_series) + + +async def serial_series_upgrade( + application, + from_series='xenial', + to_series='bionic', + origin='openstack-origin', + pause_non_leader_primary=True, + pause_non_leader_subordinate=True, + pre_upgrade_functions=None, + post_upgrade_functions=None, + post_application_upgrade_functions=None, + completed_machines=None, + follower_first=False, + files=None, + workaround_script=None +): + """Perform series upgrade on an application in serial. + + :param unit_name: Unit Name + :type unit_name: str + :param machine_num: Machine number + :type machine_num: str + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param origin: The configuration setting variable name for changing origin + source. (openstack-origin or source) + :type origin: str + :param pause_non_leader_primary: Whether the non-leader applications should + be paused + :type pause_non_leader_primary: bool + :param pause_non_leader_subordinate: Whether the non-leader subordinate + hacluster applications should be + paused + :type pause_non_leader_subordinate: bool + + :param pre_upgrade_functions: A list of Zaza functions to call before + the upgrade is started on each machine + :type pre_upgrade_functions: List[str] + :param post_upgrade_functions: A list of Zaza functions to call when + the upgrade is complete on each machine + :type post_upgrade_functions: List[str] + :param post_application_upgrade_functions: A list of Zaza functions + to call when the upgrade is complete + on all machine in the application + :param follower_first: Should the follower(s) be upgraded first + :type follower_first: bool + :type post_application_upgrade_functions: List[str] + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :returns: None + :rtype: None + """ + if completed_machines is None: + completed_machines = [] + logging.info( + "About to upgrade the units of {} in serial (follower first: {})" + .format(application, follower_first)) + status = (await model.async_get_status()).applications[application] + logging.info( + "Configuring leader / non leaders for {}".format(application)) + leader, non_leaders = get_leader_and_non_leaders(status) + for leader_name, leader_unit in leader.items(): + leader_machine = leader_unit["machine"] + leader = leader_name + + await maybe_pause_things( + status, + non_leaders, + pause_non_leader_subordinate, + pause_non_leader_primary) + logging.info("Finishing pausing application: {}".format(application)) + await series_upgrade_utils.async_set_series( + application, to_series=to_series) + logging.info("Finished set series for application: {}".format(application)) + if not follower_first and leader_machine not in completed_machines: + await model.async_wait_for_unit_idle(leader, include_subordinates=True) + await prepare_series_upgrade(leader_machine, to_series=to_series) + logging.info("About to upgrade leader of {}: {}" + .format(application, leader_machine)) + await series_upgrade_machine( + leader_machine, + origin=origin, + application=application, + files=files, workaround_script=workaround_script, + post_upgrade_functions=post_upgrade_functions) + completed_machines.append(leader_machine) + logging.info("Finished upgrading of leader for application: {}" + .format(application)) + + # for machine in machines: + for unit_name, unit in non_leaders.items(): + machine = unit['machine'] + if machine in completed_machines: + continue + await model.async_wait_for_unit_idle( + unit_name, include_subordinates=True) + await prepare_series_upgrade(machine, to_series=to_series) + logging.info("About to upgrade follower of {}: {}" + .format(application, machine)) + await series_upgrade_machine( + machine, + origin=origin, + application=application, + files=files, workaround_script=workaround_script, + post_upgrade_functions=post_upgrade_functions) + completed_machines.append(machine) + logging.info("Finished upgrading non leaders for application: {}" + .format(application)) + + if follower_first and leader_machine not in completed_machines: + await model.async_wait_for_unit_idle(leader, include_subordinates=True) + await prepare_series_upgrade(leader_machine, to_series=to_series) + logging.info("About to upgrade leader of {}: {}" + .format(application, leader_machine)) + await series_upgrade_machine( + leader_machine, + origin=origin, + application=application, + files=files, workaround_script=workaround_script, + post_upgrade_functions=post_upgrade_functions) + completed_machines.append(leader_machine) + await run_post_application_upgrade_functions( + post_application_upgrade_functions) + logging.info("Done series upgrade for: {}".format(application)) + + +async def series_upgrade_machine( + machine, + origin=None, + application=None, + post_upgrade_functions=None, + pre_upgrade_functions=None, + files=None, + workaround_script=None): + """Perform series upgrade on an machine. + + :param machine_num: Machine number + :type machine_num: str + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :param pre_upgrade_functions: A list of Zaza functions to call before + the upgrade is started on each machine + :type pre_upgrade_functions: List[str] + :param post_upgrade_functions: A list of Zaza functions to call when + the upgrade is complete on each machine + :type post_upgrade_functions: List[str] + :returns: None + :rtype: None + """ + logging.info("About to series-upgrade ({})".format(machine)) + await run_pre_upgrade_functions(machine, pre_upgrade_functions) + await add_confdef_file(machine) + await async_dist_upgrade(machine) + await async_do_release_upgrade(machine) + await remove_confdef_file(machine) + await reboot(machine) + await series_upgrade_utils.async_complete_series_upgrade(machine) + if origin: + await os_utils.async_set_origin(application, origin) + await run_post_upgrade_functions(post_upgrade_functions) + + +async def add_confdef_file(machine): + """Add the file /etc/apt/apt-conf.d/local setup to accept defaults. + + :param machine: The machine to manage + :type machine: str + :returns: None + :rtype: None + """ + create_file = ( + """echo 'DPkg::options { "--force-confdef"; "--force-confnew"; }' | """ + """sudo tee /etc/apt/apt.conf.d/local""" + ) + await model.async_run_on_machine(machine, create_file) + + +async def remove_confdef_file(machine): + """Remove the file /etc/apt/apt-conf.d/local setup to accept defaults. + + :param machine: The machine to manage + :type machine: str + :returns: None + :rtype: None + """ + await model.async_run_on_machine( + machine, + "sudo rm /etc/apt/apt.conf.d/local") + + +async def run_pre_upgrade_functions(machine, pre_upgrade_functions): + """Execute list supplied functions. + + Each of the supplied functions will be called with a single + argument of the machine that is about to be upgraded. + + :param machine: Machine that is about to be upgraded + :type machine: str + :param pre_upgrade_functions: List of awaitable functions + :type pre_upgrade_functions: [function, function, ...] + """ + if pre_upgrade_functions: + for func in pre_upgrade_functions: + logging.info("Running {}".format(func)) + m = cl_utils.get_class(func) + await m(machine) + + +async def run_post_upgrade_functions(post_upgrade_functions): + """Execute list supplied functions. + + :param post_upgrade_functions: List of awaitable functions + :type post_upgrade_functions: [function, function, ...] + """ + if post_upgrade_functions: + for func in post_upgrade_functions: + logging.info("Running {}".format(func)) + m = cl_utils.get_class(func) + await m() + + +async def run_post_application_upgrade_functions(post_upgrade_functions): + """Execute list supplied functions. + + :param post_upgrade_functions: List of awaitable functions + :type post_upgrade_functions: [function, function, ...] + """ + if post_upgrade_functions: + for func in post_upgrade_functions: + logging.info("Running {}".format(func)) + m = cl_utils.get_class(func) + await m() + + +async def maybe_pause_things( + status, units, pause_non_leader_subordinate=True, + pause_non_leader_primary=True): + """Pause the non-leaders, based on the run configuration. + + :param status: Juju status for an application + :type status: juju.applications + :param units: List of units to paybe pause + :type units: LIst[str] + :param pause_non_leader_subordinate: Should the non leader + subordinate be paused + :type pause_non_leader_subordinate: bool + :param pause_non_leader_primary: Should the non leaders be paused + :type pause_non_leader_primary: bool + :returns: Nothing + :trype: None + """ + unit_pauses = [] + for unit in units: + if pause_non_leader_subordinate: + if status["units"][unit].get("subordinates"): + for subordinate in status["units"][unit]["subordinates"]: + _app = subordinate.split('/')[0] + if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST: + logging.info("Skipping pausing {} - blacklisted" + .format(subordinate)) + else: + unit_pauses.append( + _pause_helper("subordinate", subordinate)) + if pause_non_leader_primary: + unit_pauses.append(_pause_helper("leader", unit)) + if unit_pauses: + await asyncio.gather(*unit_pauses) + + +async def _pause_helper(_type, unit): + """Pause helper to ensure that the log happens nearer to the action.""" + logging.info("Pausing ({}) {}".format(_type, unit)) + await model.async_run_action(unit, "pause", action_params={}) + logging.info("Finished Pausing ({}) {}".format(_type, unit)) + + +def get_leader_and_non_leaders(status): + """Get the leader and non-leader Juju units. + + This function returns a tuple that looks like: + + ({ + 'unit/1': juju.Unit, + }, + { + 'unit/0': juju.Unit, + 'unit/2': juju.unit, + }) + + The first entry of this tuple is the leader, and the second is + all non-leader units. + + :returns: A tuple of dicts identifying leader and non-leaders + :rtype: Dict[str, List[juju.Unit]] + """ + leader = None + non_leaders = {} + for name, unit in status["units"].items(): + if unit.get("leader"): + leader = {name: unit} + else: + non_leaders[name] = unit + return (leader, non_leaders) + + +async def prepare_series_upgrade(machine, to_series): + """Execute juju series-upgrade prepare on machine. + + NOTE: This is a new feature in juju behind a feature flag and not yet in + libjuju. + export JUJU_DEV_FEATURE_FLAGS=upgrade-series + :param machine: Machine number + :type machine: str + :param to_series: The series to which to upgrade + :type to_series: str + :returns: None + :rtype: None + """ + logging.info("Preparing series upgrade for: %s", machine) + await series_upgrade_utils.async_prepare_series_upgrade( + machine, to_series=to_series) + + +async def reboot(machine): + """Reboot the named machine. + + :param machine: Machine to reboot + :type machine: str + :returns: Nothing + :rtype: None + """ + try: + await model.async_run_on_machine(machine, 'sudo init 6 & exit') + # await run_on_machine(unit, "sudo reboot && exit") + except subprocess.CalledProcessError as error: + logging.warning("Error doing reboot: %s", error) + + +async def async_dist_upgrade(machine): + """Run dist-upgrade on unit after update package db. + + :param machine: Machine Number + :type machine: str + :returns: None + :rtype: None + """ + logging.info('Updating package db %s', machine) + update_cmd = 'sudo apt-get update' + await model.async_run_on_machine(machine, update_cmd) + + logging.info('Updating existing packages %s', machine) + dist_upgrade_cmd = ( + """yes | sudo DEBIAN_FRONTEND=noninteractive apt-get --assume-yes """ + """-o "Dpkg::Options::=--force-confdef" """ + """-o "Dpkg::Options::=--force-confold" dist-upgrade""") + await model.async_run_on_machine(machine, dist_upgrade_cmd) + rdict = await model.async_run_on_machine( + machine, + "cat /var/run/reboot-required || true") + if "Stdout" in rdict and "restart" in rdict["Stdout"].lower(): + logging.info("dist-upgrade required reboot machine: %s", machine) + await reboot(machine) + logging.info("Waiting for machine to come back afer reboot: %s", + machine) + await model.async_block_until_file_missing_on_machine( + machine, "/var/run/reboot-required") + logging.info("Waiting for machine idleness on %s", machine) + await asyncio.sleep(5.0) + await model.async_block_until_units_on_machine_are_idle(machine) + # TODO: change this to wait on units on the machine + # await model.async_block_until_all_units_idle() + + +async def async_do_release_upgrade(machine): + """Run do-release-upgrade noninteractive. + + :param machine: Machine Name + :type machine: str + :returns: None + :rtype: None + """ + logging.info('Upgrading %s', machine) + do_release_upgrade_cmd = ( + 'yes | sudo DEBIAN_FRONTEND=noninteractive ' + 'do-release-upgrade -f DistUpgradeViewNonInteractive') + + await model.async_run_on_machine( + machine, do_release_upgrade_cmd, timeout="120m") diff --git a/zaza/openstack/utilities/series_upgrade.py b/zaza/openstack/utilities/series_upgrade.py new file mode 100644 index 0000000..42683f6 --- /dev/null +++ b/zaza/openstack/utilities/series_upgrade.py @@ -0,0 +1,972 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of functions for testing series upgrade.""" + +import asyncio +import collections +import copy +import concurrent +import logging +import os +import time + +from zaza import model +from zaza.charm_lifecycle import utils as cl_utils +import zaza.openstack.utilities.generic as os_utils + + +SUBORDINATE_PAUSE_RESUME_BLACKLIST = [ + "cinder-ceph", +] + + +def app_config(charm_name, is_async=True): + """Return a dict with the upgrade config for an application. + + :param charm_name: Name of the charm about to upgrade + :type charm_name: str + :param async: Whether the upgreade functions should be async + :type async: bool + :returns: A dicitonary of the upgrade config for the application + :rtype: Dict + """ + if is_async: + default_upgrade = async_series_upgrade_application + secondary_first_upgrade = async_series_upgrade_non_leaders_first + else: + default_upgrade = series_upgrade_application + secondary_first_upgrade = series_upgrade_non_leaders_first + default = { + 'origin': 'openstack-origin', + 'pause_non_leader_subordinate': True, + 'pause_non_leader_primary': True, + 'upgrade_function': default_upgrade, + 'post_upgrade_functions': []} + _app_settings = collections.defaultdict(lambda: default) + ceph = { + 'origin': "source", + 'pause_non_leader_primary': False, + 'pause_non_leader_subordinate': False, + } + exceptions = { + 'rabbitmq-server': { + 'origin': 'source', + 'pause_non_leader_subordinate': False, }, + 'percona-cluster': {'origin': 'source', }, + 'nova-compute': { + 'pause_non_leader_primary': False, + 'pause_non_leader_subordinate': False, }, + 'ceph': ceph, + 'ceph-mon': ceph, + 'ceph-osd': ceph, + 'designate-bind': {'origin': None, }, + 'tempest': {'origin': None, }, + 'memcached': { + 'origin': None, + 'pause_non_leader_primary': False, + 'pause_non_leader_subordinate': False, + }, + 'vault': { + 'origin': None, + 'pause_non_leader_primary': False, + 'pause_non_leader_subordinate': True, + 'post_upgrade_functions': [ + ('zaza.openstack.charm_tests.vault.setup.' + 'mojo_unseal_by_unit')] + }, + 'mongodb': { + 'origin': None, + 'upgrade_function': secondary_first_upgrade, + } + } + for key, value in exceptions.items(): + _app_settings[key] = copy.deepcopy(default) + _app_settings[key].update(value) + return _app_settings[charm_name] + + +def run_post_upgrade_functions(post_upgrade_functions): + """Execute list supplied functions. + + :param post_upgrade_functions: List of functions + :type post_upgrade_functions: [function, function, ...] + """ + if post_upgrade_functions: + for func in post_upgrade_functions: + logging.info("Running {}".format(func)) + cl_utils.get_class(func)() + + +def series_upgrade_non_leaders_first( + application, from_series="trusty", + to_series="xenial", + origin='openstack-origin', + completed_machines=[], + pause_non_leader_primary=False, + pause_non_leader_subordinate=False, + files=None, + workaround_script=None, + post_upgrade_functions=None +): + """Series upgrade non leaders first. + + Wrap all the functionality to handle series upgrade for charms + which must have non leaders upgraded first. + + :param application: Name of application to upgrade series + :type application: str + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param origin: The configuration setting variable name for changing origin + source. (openstack-origin or source) + :type origin: str + :param completed_machines: List of completed machines which do no longer + require series upgrade. + :type completed_machines: list + :param pause_non_leader_primary: Whether the non-leader applications should + be paused + :type pause_non_leader_primary: bool + :param pause_non_leader_subordinate: Whether the non-leader subordinate + hacluster applications should be + paused + :type pause_non_leader_subordinate: bool + :param from_series: The series from which to upgrade + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :returns: None + :rtype: None + """ + status = model.get_status().applications[application] + leader = None + non_leaders = [] + for unit in status["units"]: + if status["units"][unit].get("leader"): + leader = unit + else: + non_leaders.append(unit) + + # Pause the non-leaders + for unit in non_leaders: + if pause_non_leader_subordinate: + if status["units"][unit].get("subordinates"): + for subordinate in status["units"][unit]["subordinates"]: + _app = subordinate.split('/')[0] + if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST: + logging.info("Skipping pausing {} - blacklisted" + .format(subordinate)) + else: + logging.info("Pausing {}".format(subordinate)) + model.run_action( + subordinate, "pause", action_params={}) + if pause_non_leader_primary: + logging.info("Pausing {}".format(unit)) + model.run_action(unit, "pause", action_params={}) + + # Series upgrade the non-leaders first + for unit in non_leaders: + machine = status["units"][unit]["machine"] + if machine not in completed_machines: + logging.info("Series upgrade non-leader unit: {}" + .format(unit)) + series_upgrade(unit, machine, + from_series=from_series, to_series=to_series, + origin=origin, + post_upgrade_functions=post_upgrade_functions) + run_post_upgrade_functions(post_upgrade_functions) + completed_machines.append(machine) + else: + logging.info("Skipping unit: {}. Machine: {} already upgraded. " + .format(unit, machine)) + model.block_until_all_units_idle() + + # Series upgrade the leader + machine = status["units"][leader]["machine"] + logging.info("Series upgrade leader: {}".format(leader)) + if machine not in completed_machines: + series_upgrade(leader, machine, + from_series=from_series, to_series=to_series, + origin=origin, + workaround_script=workaround_script, + files=files, + post_upgrade_functions=post_upgrade_functions) + completed_machines.append(machine) + else: + logging.info("Skipping unit: {}. Machine: {} already upgraded." + .format(unit, machine)) + model.block_until_all_units_idle() + + +async def async_series_upgrade_non_leaders_first( + application, + from_series="trusty", + to_series="xenial", + origin='openstack-origin', + completed_machines=[], + pause_non_leader_primary=False, + pause_non_leader_subordinate=False, + files=None, + workaround_script=None, + post_upgrade_functions=None +): + """Series upgrade non leaders first. + + Wrap all the functionality to handle series upgrade for charms + which must have non leaders upgraded first. + + :param application: Name of application to upgrade series + :type application: str + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param origin: The configuration setting variable name for changing origin + source. (openstack-origin or source) + :type origin: str + :param completed_machines: List of completed machines which do no longer + require series upgrade. + :type completed_machines: list + :param pause_non_leader_primary: Whether the non-leader applications should + be paused + :type pause_non_leader_primary: bool + :param pause_non_leader_subordinate: Whether the non-leader subordinate + hacluster applications should be + paused + :type pause_non_leader_subordinate: bool + :param from_series: The series from which to upgrade + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :returns: None + :rtype: None + """ + status = (await model.async_get_status()).applications[application] + leader = None + non_leaders = [] + for unit in status["units"]: + if status["units"][unit].get("leader"): + leader = unit + else: + non_leaders.append(unit) + + # Pause the non-leaders + for unit in non_leaders: + if pause_non_leader_subordinate: + if status["units"][unit].get("subordinates"): + for subordinate in status["units"][unit]["subordinates"]: + _app = subordinate.split('/')[0] + if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST: + logging.info("Skipping pausing {} - blacklisted" + .format(subordinate)) + else: + logging.info("Pausing {}".format(subordinate)) + await model.async_run_action( + subordinate, "pause", action_params={}) + if pause_non_leader_primary: + logging.info("Pausing {}".format(unit)) + await model.async_run_action(unit, "pause", action_params={}) + + # Series upgrade the non-leaders first + for unit in non_leaders: + machine = status["units"][unit]["machine"] + if machine not in completed_machines: + logging.info("Series upgrade non-leader unit: {}" + .format(unit)) + await async_series_upgrade( + unit, machine, + from_series=from_series, to_series=to_series, + origin=origin, + post_upgrade_functions=post_upgrade_functions) + run_post_upgrade_functions(post_upgrade_functions) + completed_machines.append(machine) + else: + logging.info("Skipping unit: {}. Machine: {} already upgraded. " + .format(unit, machine)) + await model.async_block_until_all_units_idle() + + # Series upgrade the leader + machine = status["units"][leader]["machine"] + logging.info("Series upgrade leader: {}".format(leader)) + if machine not in completed_machines: + await async_series_upgrade( + leader, machine, + from_series=from_series, to_series=to_series, + origin=origin, + workaround_script=workaround_script, + files=files, + post_upgrade_functions=post_upgrade_functions) + completed_machines.append(machine) + else: + logging.info("Skipping unit: {}. Machine: {} already upgraded." + .format(unit, machine)) + await model.async_block_until_all_units_idle() + + +def series_upgrade_application(application, pause_non_leader_primary=True, + pause_non_leader_subordinate=True, + from_series="trusty", to_series="xenial", + origin='openstack-origin', + completed_machines=[], + files=None, workaround_script=None, + post_upgrade_functions=None): + """Series upgrade application. + + Wrap all the functionality to handle series upgrade for a given + application. Including pausing non-leader units. + + :param application: Name of application to upgrade series + :type application: str + :param pause_non_leader_primary: Whether the non-leader applications should + be paused + :type pause_non_leader_primary: bool + :param pause_non_leader_subordinate: Whether the non-leader subordinate + hacluster applications should be + paused + :type pause_non_leader_subordinate: bool + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param origin: The configuration setting variable name for changing origin + source. (openstack-origin or source) + :type origin: str + :param completed_machines: List of completed machines which do no longer + require series upgrade. + :type completed_machines: list + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :returns: None + :rtype: None + """ + status = model.get_status().applications[application] + + # For some applications (percona-cluster) the leader unit must upgrade + # first. For API applications the non-leader haclusters must be paused + # before upgrade. Finally, for some applications this is arbitrary but + # generalized. + leader = None + non_leaders = [] + for unit in status["units"]: + if status["units"][unit].get("leader"): + leader = unit + else: + non_leaders.append(unit) + + # Pause the non-leaders + for unit in non_leaders: + if pause_non_leader_subordinate: + if status["units"][unit].get("subordinates"): + for subordinate in status["units"][unit]["subordinates"]: + _app = subordinate.split('/')[0] + if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST: + logging.info("Skipping pausing {} - blacklisted" + .format(subordinate)) + else: + logging.info("Pausing {}".format(subordinate)) + model.run_action( + subordinate, "pause", action_params={}) + if pause_non_leader_primary: + logging.info("Pausing {}".format(unit)) + model.run_action(unit, "pause", action_params={}) + + machine = status["units"][leader]["machine"] + # Series upgrade the leader + logging.info("Series upgrade leader: {}".format(leader)) + if machine not in completed_machines: + series_upgrade(leader, machine, + from_series=from_series, to_series=to_series, + origin=origin, workaround_script=workaround_script, + files=files, + post_upgrade_functions=post_upgrade_functions) + completed_machines.append(machine) + else: + logging.info("Skipping unit: {}. Machine: {} already upgraded." + "But setting origin on the application {}" + .format(unit, machine, application)) + logging.info("Set origin on {}".format(application)) + os_utils.set_origin(application, origin) + model.block_until_all_units_idle() + + # Series upgrade the non-leaders + for unit in non_leaders: + machine = status["units"][unit]["machine"] + if machine not in completed_machines: + logging.info("Series upgrade non-leader unit: {}" + .format(unit)) + series_upgrade(unit, machine, + from_series=from_series, to_series=to_series, + origin=origin, workaround_script=workaround_script, + files=files, + post_upgrade_functions=post_upgrade_functions) + completed_machines.append(machine) + else: + logging.info("Skipping unit: {}. Machine: {} already upgraded. " + "But setting origin on the application {}" + .format(unit, machine, application)) + logging.info("Set origin on {}".format(application)) + os_utils.set_origin(application, origin) + model.block_until_all_units_idle() + + +async def async_series_upgrade_application( + application, + pause_non_leader_primary=True, + pause_non_leader_subordinate=True, + from_series="trusty", + to_series="xenial", + origin='openstack-origin', + completed_machines=None, + files=None, workaround_script=None, + post_upgrade_functions=None, + post_application_upgrade_functions=None): + """Series upgrade application. + + Wrap all the functionality to handle series upgrade for a given + application. Including pausing non-leader units. + + :param application: Name of application to upgrade series + :type application: str + :param pause_non_leader_primary: Whether the non-leader applications should + be paused + :type pause_non_leader_primary: bool + :param pause_non_leader_subordinate: Whether the non-leader subordinate + hacluster applications should be + paused + :type pause_non_leader_subordinate: bool + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param origin: The configuration setting variable name for changing origin + source. (openstack-origin or source) + :type origin: str + :param completed_machines: List of completed machines which do no longer + require series upgrade. + :type completed_machines: list + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :param post_upgrade_functions: A list of functions to call after upgrading + each unit of an application + :type post_upgrade_functions: List[fn] + :param post_application_upgrade_functions: A list of functions to call + once after updating all units + of an application + :type post_application_upgrade_functions: List[fn] + :returns: None + :rtype: None + """ + if completed_machines is None: + completed_machines = [] + status = (await model.async_get_status()).applications[application] + + # For some applications (percona-cluster) the leader unit must upgrade + # first. For API applications the non-leader haclusters must be paused + # before upgrade. Finally, for some applications this is arbitrary but + # generalized. + leader = None + non_leaders = [] + logging.info("Configuring leader / non leaders for {}".format(application)) + for unit in status["units"]: + if status["units"][unit].get("leader"): + leader = unit + else: + non_leaders.append(unit) + + # Pause the non-leaders + for unit in non_leaders: + if pause_non_leader_subordinate: + if status["units"][unit].get("subordinates"): + for subordinate in status["units"][unit]["subordinates"]: + _app = subordinate.split('/')[0] + if _app in SUBORDINATE_PAUSE_RESUME_BLACKLIST: + logging.info("Skipping pausing {} - blacklisted" + .format(subordinate)) + else: + logging.info("Pausing {}".format(subordinate)) + await model.async_run_action( + subordinate, "pause", action_params={}) + if pause_non_leader_primary: + logging.info("Pausing {}".format(unit)) + await model.async_run_action(unit, "pause", action_params={}) + + machine = status["units"][leader]["machine"] + # Series upgrade the leader + logging.info("Series upgrade leader: {}".format(leader)) + if machine not in completed_machines: + await async_series_upgrade( + leader, machine, + from_series=from_series, + to_series=to_series, + origin=origin, + workaround_script=workaround_script, + files=files, + post_upgrade_functions=post_upgrade_functions) + completed_machines.append(machine) + else: + logging.info("Skipping unit: {}. Machine: {} already upgraded." + "But setting origin on the application {}" + .format(unit, machine, application)) + logging.info("Set origin on {}".format(application)) + await os_utils.async_set_origin(application, origin) + await wait_for_unit_idle(unit) + + # Series upgrade the non-leaders + for unit in non_leaders: + machine = status["units"][unit]["machine"] + if machine not in completed_machines: + logging.info("Series upgrade non-leader unit: {}" + .format(unit)) + await async_series_upgrade( + unit, machine, + from_series=from_series, + to_series=to_series, + origin=origin, + workaround_script=workaround_script, + files=files, + post_upgrade_functions=post_upgrade_functions) + completed_machines.append(machine) + else: + logging.info("Skipping unit: {}. Machine: {} already upgraded. " + "But setting origin on the application {}" + .format(unit, machine, application)) + logging.info("Set origin on {}".format(application)) + await os_utils.async_set_origin(application, origin) + await wait_for_unit_idle(unit) + run_post_upgrade_functions(post_application_upgrade_functions) + + +# TODO: Move these functions into zaza.model +async def wait_for_unit_idle(unit_name, timeout=600): + """Wait until the unit's agent is idle. + + :param unit_name: The unit name of the application, ex: mysql/0 + :type unit_name: str + :param timeout: How long to wait before timing out + :type timeout: int + :returns: None + :rtype: None + """ + app = unit_name.split('/')[0] + try: + await model.async_block_until( + _unit_idle(app, unit_name), + timeout=timeout) + except concurrent.futures._base.TimeoutError: + raise model.ModelTimeout("Zaza has timed out waiting on {} to " + "reach idle state.".format(unit_name)) + + +def _unit_idle(app, unit_name): + async def f(): + x = await get_agent_status(app, unit_name) + return x == "idle" + return f + + +async def get_agent_status(app, unit_name): + """Get the current status of the specified unit. + + :param app: The name of the Juju application, ex: mysql + :type app: str + :param unit_name: The unit name of the application, ex: mysql/0 + :type unit_name: str + :returns: The agent status, either active / idle, returned by Juju + :rtype: str + """ + return (await model.async_get_status()). \ + applications[app]['units'][unit_name]['agent-status']['status'] + + +def series_upgrade(unit_name, machine_num, + from_series="trusty", to_series="xenial", + origin='openstack-origin', + files=None, workaround_script=None, + post_upgrade_functions=None): + """Perform series upgrade on a unit. + + :param unit_name: Unit Name + :type unit_name: str + :param machine_num: Machine number + :type machine_num: str + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param origin: The configuration setting variable name for changing origin + source. (openstack-origin or source) + :type origin: str + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :returns: None + :rtype: None + """ + logging.info("Series upgrade {}".format(unit_name)) + application = unit_name.split('/')[0] + os_utils.set_dpkg_non_interactive_on_unit(unit_name) + dist_upgrade(unit_name) + model.block_until_all_units_idle() + logging.info("Prepare series upgrade on {}".format(machine_num)) + model.prepare_series_upgrade(machine_num, to_series=to_series) + logging.info("Waiting for workload status 'blocked' on {}" + .format(unit_name)) + model.block_until_unit_wl_status(unit_name, "blocked") + logging.info("Waiting for model idleness") + model.block_until_all_units_idle() + wrap_do_release_upgrade(unit_name, from_series=from_series, + to_series=to_series, files=files, + workaround_script=workaround_script) + logging.info("Reboot {}".format(unit_name)) + os_utils.reboot(unit_name) + logging.info("Waiting for workload status 'blocked' on {}" + .format(unit_name)) + model.block_until_unit_wl_status(unit_name, "blocked") + logging.info("Waiting for model idleness") + model.block_until_all_units_idle() + logging.info("Complete series upgrade on {}".format(machine_num)) + model.complete_series_upgrade(machine_num) + model.block_until_all_units_idle() + logging.info("Set origin on {}".format(application)) + # Allow for charms which have neither source nor openstack-origin + if origin: + os_utils.set_origin(application, origin) + model.block_until_all_units_idle() + logging.info("Running run_post_upgrade_functions {}".format( + post_upgrade_functions)) + run_post_upgrade_functions(post_upgrade_functions) + logging.info("Waiting for workload status 'active' on {}" + .format(unit_name)) + model.block_until_unit_wl_status(unit_name, "active") + model.block_until_all_units_idle() + # This step may be performed by juju in the future + logging.info("Set series on {} to {}".format(application, to_series)) + model.set_series(application, to_series) + + +async def async_series_upgrade(unit_name, machine_num, + from_series="trusty", to_series="xenial", + origin='openstack-origin', + files=None, workaround_script=None, + post_upgrade_functions=None): + """Perform series upgrade on a unit. + + :param unit_name: Unit Name + :type unit_name: str + :param machine_num: Machine number + :type machine_num: str + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param origin: The configuration setting variable name for changing origin + source. (openstack-origin or source) + :type origin: str + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :returns: None + :rtype: None + """ + logging.info("Series upgrade {}".format(unit_name)) + application = unit_name.split('/')[0] + await os_utils.async_set_dpkg_non_interactive_on_unit(unit_name) + await async_dist_upgrade(unit_name) + await wait_for_unit_idle(unit_name) + logging.info("Prepare series upgrade on {}".format(machine_num)) + await async_prepare_series_upgrade(machine_num, to_series=to_series) + logging.info("Waiting for workload status 'blocked' on {}" + .format(unit_name)) + await model.async_block_until_unit_wl_status(unit_name, "blocked") + logging.info("Waiting for unit {} idleness".format(unit_name)) + await wait_for_unit_idle(unit_name) + await async_wrap_do_release_upgrade(unit_name, from_series=from_series, + to_series=to_series, files=files, + workaround_script=workaround_script) + logging.info("Reboot {}".format(unit_name)) + await os_utils.async_reboot(unit_name) + logging.info("Waiting for workload status 'blocked' on {}" + .format(unit_name)) + await model.async_block_until_unit_wl_status(unit_name, "blocked") + # Allow for charms which have neither source nor openstack-origin + if origin: + logging.info("Set origin on {}".format(application)) + await os_utils.async_set_origin(application, origin) + await wait_for_unit_idle(unit_name) + logging.info("Complete series upgrade on {}".format(machine_num)) + await async_complete_series_upgrade(machine_num) + await wait_for_unit_idle(unit_name, timeout=1200) + logging.info("Running run_post_upgrade_functions {}".format( + post_upgrade_functions)) + run_post_upgrade_functions(post_upgrade_functions) + logging.info("Waiting for workload status 'active' on {}" + .format(unit_name)) + await model.async_block_until_unit_wl_status(unit_name, "active") + await wait_for_unit_idle(unit_name) + # This step may be performed by juju in the future + logging.info("Set series on {} to {}".format(application, to_series)) + await async_set_series(application, to_series) + + +async def async_prepare_series_upgrade(machine_num, to_series="xenial"): + """Execute juju series-upgrade prepare on machine. + + NOTE: This is a new feature in juju behind a feature flag and not yet in + libjuju. + export JUJU_DEV_FEATURE_FLAGS=upgrade-series + :param machine_num: Machine number + :type machine_num: str + :param to_series: The series to which to upgrade + :type to_series: str + :returns: None + :rtype: None + """ + juju_model = await model.async_get_juju_model() + cmd = ["juju", "upgrade-series", "-m", juju_model, + machine_num, "prepare", to_series, "--yes"] + logging.info("About to call '{}'".format(cmd)) + await os_utils.check_call(cmd) + + +async def async_complete_series_upgrade(machine_num): + """Execute juju series-upgrade complete on machine. + + NOTE: This is a new feature in juju behind a feature flag and not yet in + libjuju. + export JUJU_DEV_FEATURE_FLAGS=upgrade-series + :param machine_num: Machine number + :type machine_num: str + :returns: None + :rtype: None + """ + juju_model = await model.async_get_juju_model() + cmd = ["juju", "upgrade-series", "-m", juju_model, + machine_num, "complete"] + logging.info("About to call '{}'".format(cmd)) + await os_utils.check_call(cmd) + + +async def async_set_series(application, to_series): + """Execute juju set-series complete on application. + + NOTE: This is a new feature in juju and not yet in libjuju. + :param application: Name of application to upgrade series + :type application: str + :param to_series: The series to which to upgrade + :type to_series: str + :returns: None + :rtype: None + """ + juju_model = await model.async_get_juju_model() + cmd = ["juju", "set-series", "-m", juju_model, + application, to_series] + logging.info("About to call '{}'".format(cmd)) + await os_utils.check_call(cmd) + + +def wrap_do_release_upgrade(unit_name, from_series="trusty", + to_series="xenial", + files=None, workaround_script=None): + """Wrap do release upgrade. + + In a production environment this step would be run administratively. + For testing purposes we need this automated. + + :param unit_name: Unit Name + :type unit_name: str + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :returns: None + :rtype: None + """ + # Pre upgrade hacks + # There are a few necessary hacks to accomplish an automated upgrade + # to overcome some packaging bugs. + # Copy scripts + if files: + logging.info("SCP files") + for _file in files: + logging.info("SCP {}".format(_file)) + model.scp_to_unit(unit_name, _file, os.path.basename(_file)) + + # Run Script + if workaround_script: + logging.info("Running workaround script") + os_utils.run_via_ssh(unit_name, workaround_script) + + # Actually do the do_release_upgrade + do_release_upgrade(unit_name) + + +async def async_wrap_do_release_upgrade(unit_name, from_series="trusty", + to_series="xenial", + files=None, workaround_script=None): + """Wrap do release upgrade. + + In a production environment this step would be run administratively. + For testing purposes we need this automated. + + :param unit_name: Unit Name + :type unit_name: str + :param from_series: The series from which to upgrade + :type from_series: str + :param to_series: The series to which to upgrade + :type to_series: str + :param files: Workaround files to scp to unit under upgrade + :type files: list + :param workaround_script: Workaround script to run during series upgrade + :type workaround_script: str + :returns: None + :rtype: None + """ + # Pre upgrade hacks + # There are a few necessary hacks to accomplish an automated upgrade + # to overcome some packaging bugs. + # Copy scripts + if files: + logging.info("SCP files") + for _file in files: + logging.info("SCP {}".format(_file)) + await model.async_scp_to_unit( + unit_name, _file, os.path.basename(_file)) + + # Run Script + if workaround_script: + logging.info("Running workaround script") + await os_utils.async_run_via_ssh(unit_name, workaround_script) + + # Actually do the do_release_upgrade + await async_do_release_upgrade(unit_name) + + +def dist_upgrade(unit_name): + """Run dist-upgrade on unit after update package db. + + :param unit_name: Unit Name + :type unit_name: str + :returns: None + :rtype: None + """ + logging.info('Updating package db ' + unit_name) + update_cmd = 'sudo apt update' + model.run_on_unit(unit_name, update_cmd) + + logging.info('Updating existing packages ' + unit_name) + dist_upgrade_cmd = ( + """sudo DEBIAN_FRONTEND=noninteractive apt --assume-yes """ + """-o "Dpkg::Options::=--force-confdef" """ + """-o "Dpkg::Options::=--force-confold" dist-upgrade""") + model.run_on_unit(unit_name, dist_upgrade_cmd) + rdict = model.run_on_unit(unit_name, + "cat /var/run/reboot-required || true") + if "Stdout" in rdict and "restart" in rdict["Stdout"].lower(): + logging.info("dist-upgrade required reboot {}".format(unit_name)) + os_utils.reboot(unit_name) + logging.info("Waiting for workload status 'unknown' on {}" + .format(unit_name)) + model.block_until_unit_wl_status(unit_name, "unknown") + logging.info("Waiting for workload status to return to normal on {}" + .format(unit_name)) + model.block_until_unit_wl_status( + unit_name, "unknown", negate_match=True) + logging.info("Waiting for model idleness") + # pause for a big + time.sleep(5.0) + model.block_until_all_units_idle() + + +async def async_dist_upgrade(unit_name): + """Run dist-upgrade on unit after update package db. + + :param unit_name: Unit Name + :type unit_name: str + :returns: None + :rtype: None + """ + logging.info('Updating package db ' + unit_name) + update_cmd = 'sudo apt update' + await model.async_run_on_unit(unit_name, update_cmd) + + logging.info('Updating existing packages ' + unit_name) + dist_upgrade_cmd = ( + """sudo DEBIAN_FRONTEND=noninteractive apt --assume-yes """ + """-o "Dpkg::Options::=--force-confdef" """ + """-o "Dpkg::Options::=--force-confold" dist-upgrade""") + await model.async_run_on_unit(unit_name, dist_upgrade_cmd) + rdict = await model.async_run_on_unit( + unit_name, "cat /var/run/reboot-required || true") + if "Stdout" in rdict and "restart" in rdict["Stdout"].lower(): + logging.info("dist-upgrade required reboot {}".format(unit_name)) + await os_utils.async_reboot(unit_name) + logging.info("Waiting for workload status 'unknown' on {}" + .format(unit_name)) + await model.async_block_until_unit_wl_status(unit_name, "unknown") + logging.info("Waiting for workload status to return to normal on {}" + .format(unit_name)) + await model.async_block_until_unit_wl_status( + unit_name, "unknown", negate_match=True) + logging.info("Waiting for model idleness") + await asyncio.sleep(5.0) + await model.async_block_until_all_units_idle() + + +def do_release_upgrade(unit_name): + """Run do-release-upgrade noninteractive. + + :param unit_name: Unit Name + :type unit_name: str + :returns: None + :rtype: None + """ + logging.info('Upgrading ' + unit_name) + # NOTE: It is necessary to run this via juju ssh rather than juju run due + # to timeout restrictions and error handling. + os_utils.run_via_ssh( + unit_name, + 'DEBIAN_FRONTEND=noninteractive ' + 'do-release-upgrade -f DistUpgradeViewNonInteractive') + + +async def async_do_release_upgrade(unit_name): + """Run do-release-upgrade noninteractive. + + :param unit_name: Unit Name + :type unit_name: str + :returns: None + :rtype: None + """ + logging.info('Upgrading ' + unit_name) + # NOTE: It is necessary to run this via juju ssh rather than juju run due + # to timeout restrictions and error handling. + await os_utils.async_run_via_ssh( + unit_name, + 'DEBIAN_FRONTEND=noninteractive ' + 'do-release-upgrade -f DistUpgradeViewNonInteractive', + raise_exceptions=True) diff --git a/zaza/openstack/utilities/swift.py b/zaza/openstack/utilities/swift.py new file mode 100644 index 0000000..2b1f46a --- /dev/null +++ b/zaza/openstack/utilities/swift.py @@ -0,0 +1,300 @@ +# Copyright 2019 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Swift utilities.""" + +import logging +import uuid +import zaza.model +import zaza.openstack.utilities.juju as juju_utils + + +class ObjectReplica: + """A replica of an object. + + The replica attributes show the location of an object replica. + + server: IP address or hostname of machine hosting replica + port: Port of swift object server running on machine hosting replica + device: Path to device hosting replica + handoff_device: Whether this is a handoff devices. Handoff devices pass + the replica on to a remote storage node. + """ + + def __init__(self, raw_line): + """Extract storage info from text.""" + rl = raw_line.split() + self.server, self.port = rl[2].split(':') + self.device = rl[3] + self.handoff_device = rl[-1] == '[Handoff]' + + +class ObjectReplicas: + """Replicas of an object.""" + + def __init__(self, proxy_app, account, container_name, object_name, + storage_topology, model_name=None): + """Find all replicas of given object. + + :param proxy_app: Name of proxy application + :type proxy_app: str + :param account: Account that owns the container. + :type account: str + :param container_name: Name of container that contains the object. + :type container_name: str + :param object_name: Name of object. + :type object_name: str + :param storage_topology: Dictionary keyed on IP of storage node info. + :type storage_topology: {} + :param model_name: Model to point environment at + :type model_name: str + """ + self.replicas = [] + self.replica_placements = {} + self.storage_topology = storage_topology + raw_output = self.run_get_nodes( + proxy_app, + account, + container_name, + object_name, + model_name=model_name) + for line in self.extract_storage_lines(raw_output): + self.add_replica(line) + + def add_replica(self, storage_line): + """Add a replica to the replica set.""" + self.replicas.append(ObjectReplica(storage_line)) + + def extract_storage_lines(self, raw_output): + """Extract replica list from output of swift-get-nodes. + + :param storage_line: Output of swift-get-nodes + :type storage_line: str + :returns: List of lines relating to replicas. + :rtype: [str, ...] + """ + storage_lines = [] + for line in raw_output.split('\n'): + if line.startswith('Server:Port '): + storage_lines.append(line) + return storage_lines + + def run_get_nodes(self, proxy_app, account, container_name, object_name, + model_name=None): + """Run swift-get-nodes for an object on a proxy unit. + + :param proxy_app: Name of proxy application + :type proxy_app: str + :param account: Account that owns the container. + :type account: str + :param container_name: Name of container that contains the object. + :type container_name: str + :param object_name: Name of object. + :type object_name: str + :param model_name: Model to point environment at + :type model_name: str + :returns: Stdout of command + :rtype: str + """ + ring_file = '/etc/swift/object.ring.gz' + obj_cmd = "swift-get-nodes -a {} {} {} {}".format( + ring_file, + account, + container_name, + object_name) + cmd_result = zaza.model.run_on_leader( + proxy_app, + obj_cmd, + model_name=model_name) + return cmd_result['Stdout'] + + @property + def hand_off_ips(self): + """Replicas which are marked as handoff devices. + + These are not real replicas. They hand off the replica to other node. + + :returns: List of IPS of handoff nodes for object. + :rtype: List[str] + """ + return [r.server for r in self.replicas if r.handoff_device] + + @property + def storage_ips(self): + """Ip addresses of nodes that are housing a replica. + + :returns: List of IPS of storage nodes holding a replica of the object. + :rtype: [str, ...] + """ + return [r.server for r in self.replicas if not r.handoff_device] + + @property + def placements(self): + """Region an zone information for each replica. + + Zone info is in the form: + [{ + 'app_name': str, + 'unit': juju.Unit, + 'region': int, + 'zone': int}, ...] + + :returns: List of dicts with region and zone information. + :rtype: List[Dict[str, Union[str,int]]] + """ + return [self.storage_topology[ip] for ip in self.storage_ips] + + @property + def distinct_regions(self): + """List of distinct regions that have a replica. + + :returns: List of regions that have a replica + :rtype: [int, ...] + """ + return list(set([p['region'] for p in self.placements])) + + @property + def all_zones(self): + """List of all zones that have a replica. + + :returns: List of tuples (region, zone) that have a replica. + :rtype: List[Tuple[str, str]] + """ + return [(p['region'], p['zone']) for p in self.placements] + + @property + def distinct_zones(self): + """List of distinct region + zones that have a replica. + + :returns: List of tuples (region, zone) that have a replica. + :rtype: [(r1, z1), ...] + """ + return list(set(self.all_zones)) + + +def get_swift_storage_topology(model_name=None): + """Get details of storage nodes and which region and zones they belong in. + + :param model_name: Model to point environment at + :type model_name: str + :returns: Dictionary of storage nodes and their region/zone information. + :rtype: { + 'ip (str)': { + 'app_name': str, + 'unit': juju.Unit + 'region': int, + 'zone': int}, + ...} + """ + topology = {} + status = juju_utils.get_full_juju_status(model_name=model_name) + for app_name, app_dep_config in status.applications.items(): + if 'swift-storage' in app_dep_config['charm']: + app_config = zaza.model.get_application_config( + app_name, + model_name=model_name) + region = app_config['storage-region']['value'] + zone = app_config['zone']['value'] + for unit in zaza.model.get_units(app_name, model_name=model_name): + topology[unit.public_address] = { + 'app_name': app_name, + 'unit': unit, + 'region': region, + 'zone': zone} + return topology + + +def setup_test_container(swift_client, resource_prefix): + """Create a swift container for use be tests. + + :param swift_client: Swift client to use for object creation + :type swift_client: swiftclient.Client + :returns: (container_name, account_name) Container name and account + name for new container + :rtype: Tuple[str, str] + """ + run_id = str(uuid.uuid1()).split('-')[0] + container_name = '{}-{}-container'.format(resource_prefix, run_id) + swift_client.put_container(container_name) + resp_headers, containers = swift_client.get_account() + account = resp_headers['x-account-project-domain-id'] + return container_name, account + + +def apply_proxy_config(proxy_app, config, model_name=None): + """Update the give proxy_app with new charm config. + + :param proxy_app: Name of proxy application + :type proxy_app: str + :param config: Dictionary of configuration setting(s) to apply + :type config: dict + :param model_name: Name of model to query. + :type model_name: str + """ + current_config = zaza.model.get_application_config( + proxy_app, + model_name=model_name) + # Although there is no harm in applying config that is a noop it + # does affect the expected behaviour afterwards. So, only apply + # genuine changes so we can safely expect the charm to fire a hook. + for key, value in config.items(): + if str(config[key]) != str(current_config[key]['value']): + break + else: + logging.info( + 'Config update for {} not required.'.format(proxy_app)) + return + logging.info('Updating {} charm settings'.format(proxy_app)) + zaza.model.set_application_config( + proxy_app, + config, + model_name=model_name) + zaza.model.block_until_all_units_idle() + + +def create_object(swift_client, proxy_app, storage_topology, resource_prefix, + model_name=None): + """Create a test object in a new container. + + :param swift_client: Swift client to use for object creation + :type swift_client: swiftclient.Client + :param proxy_app: Name of proxy application + :type proxy_app: str + :param storage_topology: Dictionary keyed on IP of storage node info. + :type storage_topology: {} + :param resource_prefix: Prefix to use when naming new resources + :type resource_prefix: str + :param model_name: Model to point environment at + :type model_name: str + :returns: (container_name, object_name, object replicas) + :rtype: (str, str, ObjectReplicas) + """ + container_name, account = setup_test_container( + swift_client, + resource_prefix) + object_name = 'zaza_test_object.txt' + swift_client.put_object( + container_name, + object_name, + contents='File contents', + content_type='text/plain' + ) + obj_replicas = ObjectReplicas( + proxy_app, + account, + container_name, + object_name, + storage_topology, + model_name=model_name) + return container_name, object_name, obj_replicas diff --git a/zaza/openstack/utilities/upgrade_utils.py b/zaza/openstack/utilities/upgrade_utils.py new file mode 100644 index 0000000..4f00592 --- /dev/null +++ b/zaza/openstack/utilities/upgrade_utils.py @@ -0,0 +1,347 @@ +# Copyright 2020 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Collection of functions to support upgrade testing.""" + +import itertools +import logging +import re + +import zaza.model +import zaza.utilities.juju +from zaza.openstack.utilities.os_versions import ( + OPENSTACK_CODENAMES, + UBUNTU_OPENSTACK_RELEASE, + OPENSTACK_RELEASES_PAIRS, +) + +""" +The below upgrade order is surfaced in end-user documentation. Any change to +it should be accompanied by an update to the OpenStack Charms Deployment Guide +for both charm upgrades and payload upgrades: +- source/upgrade-charms.rst#upgrade-order +- source/upgrade-openstack.rst#openstack_upgrade_order +""" +SERVICE_GROUPS = ( + ('Database Services', ['percona-cluster', 'mysql-innodb-cluster']), + ('Stateful Services', ['rabbitmq-server', 'ceph-mon']), + ('Core Identity', ['keystone']), + ('Control Plane', [ + 'aodh', 'barbican', 'ceilometer', 'ceph-fs', + 'ceph-radosgw', 'cinder', 'designate', + 'designate-bind', 'glance', 'gnocchi', 'heat', 'manila', + 'manila-generic', 'neutron-api', 'neutron-gateway', 'placement', + 'nova-cloud-controller', 'openstack-dashboard']), + ('Data Plane', [ + 'nova-compute', 'ceph-osd', + 'swift-proxy', 'swift-storage'])) + +UPGRADE_EXCLUDE_LIST = ['rabbitmq-server', 'percona-cluster'] + + +def get_upgrade_candidates(model_name=None, filters=None): + """Extract list of apps from model that can be upgraded. + + :param model_name: Name of model to query. + :type model_name: str + :param filters: List of filter functions to apply + :type filters: List[fn] + :returns: List of application that can have their payload upgraded. + :rtype: Dict[str, Dict[str, ANY]] + """ + if filters is None: + filters = [] + status = zaza.model.get_status(model_name=model_name) + candidates = {} + for app, app_config in status.applications.items(): + if _include_app(app, app_config, filters, model_name=model_name): + candidates[app] = app_config + return candidates + + +def _include_app(app, app_config, filters, model_name=None): + for filt in filters: + if filt(app, app_config, model_name=model_name): + return False + return True + + +def _filter_subordinates(app, app_config, model_name=None): + if app_config.get("subordinate-to"): + logging.warning( + "Excluding {} from upgrade, it is a subordinate".format(app)) + return True + return False + + +def _filter_openstack_upgrade_list(app, app_config, model_name=None): + charm_name = extract_charm_name_from_url(app_config['charm']) + if app in UPGRADE_EXCLUDE_LIST or charm_name in UPGRADE_EXCLUDE_LIST: + print("Excluding {} from upgrade, on the exclude list".format(app)) + logging.warning( + "Excluding {} from upgrade, on the exclude list".format(app)) + return True + return False + + +def _filter_non_openstack_services(app, app_config, model_name=None): + charm_options = zaza.model.get_application_config( + app, model_name=model_name).keys() + src_options = ['openstack-origin', 'source'] + if not [x for x in src_options if x in charm_options]: + logging.warning( + "Excluding {} from upgrade, no src option".format(app)) + return True + return False + + +def _apply_extra_filters(filters, extra_filters): + if extra_filters: + if isinstance(extra_filters, list): + filters.extend(extra_filters) + elif callable(extra_filters): + filters.append(extra_filters) + else: + raise RuntimeError( + "extra_filters should be a list of " + "callables") + return filters + + +def _filter_easyrsa(app, app_config, model_name=None): + charm_name = extract_charm_name_from_url(app_config['charm']) + if "easyrsa" in charm_name: + logging.warn("Skipping upgrade of easyrsa Bug #1850121") + return True + return False + + +def _filter_etcd(app, app_config, model_name=None): + charm_name = extract_charm_name_from_url(app_config['charm']) + if "etcd" in charm_name: + logging.warn("Skipping upgrade of easyrsa Bug #1850124") + return True + return False + + +def _filter_memcached(app, app_config, model_name=None): + charm_name = extract_charm_name_from_url(app_config['charm']) + if "memcached" in charm_name: + logging.warn("Skipping upgrade of memcached charm") + return True + return False + + +def get_upgrade_groups(model_name=None, extra_filters=None): + """Place apps in the model into their upgrade groups. + + Place apps in the model into their upgrade groups. If an app is deployed + but is not in SERVICE_GROUPS then it is placed in a sweep_up group. + + :param model_name: Name of model to query. + :type model_name: str + :returns: Dict of group lists keyed on group name. + :rtype: collections.OrderedDict + """ + filters = [ + _filter_subordinates, + _filter_openstack_upgrade_list, + _filter_non_openstack_services, + ] + filters = _apply_extra_filters(filters, extra_filters) + apps_in_model = get_upgrade_candidates( + model_name=model_name, + filters=filters) + + return _build_service_groups(apps_in_model) + + +def get_series_upgrade_groups(model_name=None, extra_filters=None): + """Place apps in the model into their upgrade groups. + + Place apps in the model into their upgrade groups. If an app is deployed + but is not in SERVICE_GROUPS then it is placed in a sweep_up group. + + :param model_name: Name of model to query. + :type model_name: str + :returns: List of tuples(group name, applications) + :rtype: List[Tuple[str, Dict[str, ANY]]] + """ + filters = [_filter_subordinates] + filters = _apply_extra_filters(filters, extra_filters) + apps_in_model = get_upgrade_candidates( + model_name=model_name, + filters=filters) + + return _build_service_groups(apps_in_model) + + +def get_charm_upgrade_groups(model_name=None, extra_filters=None): + """Place apps in the model into their upgrade groups for a charm upgrade. + + Place apps in the model into their upgrade groups. If an app is deployed + but is not in SERVICE_GROUPS then it is placed in a sweep_up group. + + :param model_name: Name of model to query. + :type model_name: str + :returns: Dict of group lists keyed on group name. + :rtype: collections.OrderedDict + """ + filters = _apply_extra_filters([], extra_filters) + apps_in_model = get_upgrade_candidates( + model_name=model_name, + filters=filters) + + return _build_service_groups(apps_in_model) + + +def _build_service_groups(applications): + groups = [] + for phase_name, charms in SERVICE_GROUPS: + group = [] + for app, app_config in applications.items(): + charm_name = extract_charm_name_from_url(app_config['charm']) + if charm_name in charms: + group.append(app) + groups.append((phase_name, group)) + + # collect all the values into a list, and then a lookup hash + values = list(itertools.chain(*(ls for _, ls in groups))) + vhash = {v: 1 for v in values} + sweep_up = [app for app in applications if app not in vhash] + groups.append(('sweep_up', sweep_up)) + for name, group in groups: + group.sort() + return groups + + +def extract_charm_name_from_url(charm_url): + """Extract the charm name from the charm url. + + E.g. Extract 'heat' from local:bionic/heat-12 + + :param charm_url: Name of model to query. + :type charm_url: str + :returns: Charm name + :rtype: str + """ + charm_name = re.sub(r'-[0-9]+$', '', charm_url.split('/')[-1]) + return charm_name.split(':')[-1] + + +def get_all_principal_applications(model_name=None): + """Return a list of all the prinical applications in the model. + + :param model_name: Optional model name + :type model_name: Optional[str] + :returns: List of principal application names + :rtype: List[str] + """ + status = zaza.utilities.juju.get_full_juju_status(model_name=model_name) + return [application for application in status.applications.keys() + if not status.applications.get(application)['subordinate-to']] + + +def get_lowest_openstack_version(current_versions): + """Get the lowest OpenStack version from the list of current versions. + + :param current_versions: The list of versions + :type current_versions: List[str] + :returns: the lowest version currently installed. + :rtype: str + """ + lowest_version = 'zebra' + for svc in current_versions.keys(): + if current_versions[svc] < lowest_version: + lowest_version = current_versions[svc] + return lowest_version + + +def determine_next_openstack_release(release): + """Determine the next release after the one passed as a str. + + The returned value is a tuple of the form: ('2020.1', 'ussuri') + + :param release: the release to use as the base + :type release: str + :returns: the release tuple immediately after the current one. + :rtype: Tuple[str, str] + :raises: KeyError if the current release doesn't actually exist + """ + old_index = list(OPENSTACK_CODENAMES.values()).index(release) + new_index = old_index + 1 + return list(OPENSTACK_CODENAMES.items())[new_index] + + +def determine_new_source(ubuntu_version, current_source, new_release, + single_increment=True): + """Determine the new source/openstack-origin value based on new release. + + This takes the ubuntu_version and the current_source (in the form of + 'distro' or 'cloud:xenial-mitaka') and either converts it to a new source, + or returns None if the new_release will match the current_source (i.e. it's + already at the right release), or it's simply not possible. + + If single_increment is set, then the returned source will only be returned + if the new_release is one more than the release in the current source. + + :param ubuntu_version: the ubuntu version that the app is installed on. + :type ubuntu_version: str + :param current_source: a source in the form of 'distro' or + 'cloud:xenial-mitaka' + :type current_source: str + :param new_release: a new OpenStack version codename. e.g. 'stein' + :type new_release: str + :param single_increment: If True, only allow single increment upgrade. + :type single_increment: boolean + :returns: The new source in the form of 'cloud:bionic-train' or None if not + possible + :rtype: Optional[str] + :raises: KeyError if any of the strings don't correspond to known values. + """ + logging.warn("determine_new_source: locals: %s", locals()) + if current_source == 'distro': + # convert to a ubuntu-openstack pair + current_source = "cloud:{}-{}".format( + ubuntu_version, UBUNTU_OPENSTACK_RELEASE[ubuntu_version]) + # strip out the current openstack version + if ':' not in current_source: + current_source = "cloud:{}-{}".format(ubuntu_version, current_source) + pair = current_source.split(':')[1] + u_version, os_version = pair.split('-', 2) + if u_version != ubuntu_version: + logging.warn("determine_new_source: ubuntu_versions don't match: " + "%s != %s" % (ubuntu_version, u_version)) + return None + # determine versions + openstack_codenames = list(OPENSTACK_CODENAMES.values()) + old_index = openstack_codenames.index(os_version) + try: + new_os_version = openstack_codenames[old_index + 1] + except IndexError: + logging.warn("determine_new_source: no OpenStack version after " + "'%s'" % os_version) + return None + if single_increment and new_release != new_os_version: + logging.warn("determine_new_source: requested version '%s' not a " + "single increment from '%s' which is '%s'" % ( + new_release, os_version, new_os_version)) + return None + # now check that there is a combination of u_version-new_os_version + new_pair = "{}_{}".format(u_version, new_os_version) + if new_pair not in OPENSTACK_RELEASES_PAIRS: + logging.warn("determine_new_source: now release pair candidate for " + " combination cloud:%s-%s" % (u_version, new_os_version)) + return None + return "cloud:{}-{}".format(u_version, new_os_version)