Bring over network specific utilities

Bring over only those utilities required to run network_setup.py from
openstack-mojo-specs. These utilities have been generalized and python3
configured. Use libjuju whenever possible and pre-deprecate anything
else.

Add addition functions to the model.
This commit is contained in:
David Ames
2018-03-30 16:41:17 -07:00
parent 99710b5e83
commit 22e7ffc1e1
9 changed files with 1318 additions and 2 deletions

6
.gitignore vendored
View File

@@ -1,2 +1,6 @@
.tox
*.pyc
*.pyc
build/
dist/
.local
zaza.egg-info/

View File

@@ -32,7 +32,7 @@ class Tox(TestCommand):
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
# import here, cause outside the eggs aren't loaded
import tox
import shlex
args = self.tox_args

View File

@@ -4,3 +4,26 @@ PyYAML
flake8>=2.2.4,<=3.5.0
mock>=1.2
nose>=1.3.7
pbr>=1.8.0,<1.9.0
simplejson>=2.2.0
netifaces>=0.10.4
netaddr>=0.7.12,!=0.7.16
Jinja2>=2.6 # BSD License (3 clause)
six>=1.9.0
dnspython>=1.12.0
psutil>=1.1.1,<2.0.0
python-openstackclient>=3.14.0
aodhclient
python-designateclient
python-ceilometerclient
python-cinderclient
python-glanceclient
python-heatclient
python-keystoneclient
python-neutronclient
python-novaclient
python-swiftclient
distro-info
paramiko

View File

@@ -178,6 +178,43 @@ def scp_from_unit(unit_name, model_name, source, destination, user='ubuntu',
run_in_model(model_name, scp_func, add_model_arg=True, awaitable=True))
def run_on_unit(unit, model_name, command):
"""Juju run on unit
:param unit: Unit object
:type unit: object
:param model_name: Name of model unit is in
:type model_name: str
:param command: Command to execute
:type command: str
"""
async def _run_on_unit(unit, command):
return await unit.run(command)
run_func = functools.partial(
_run_on_unit,
unit,
command)
loop.run(
run_in_model(model_name, run_func, add_model_arg=True, awaitable=True))
def get_application(model_name, application_name):
"""Return an application object
:param model_name: Name of model to query.
:type model_name: str
:param application_name: Name of application to retrieve units for
:type application_name: str
:returns: Appliction object
:rtype: object
"""
async def _get_application(application_name, model):
return model.applications[application_name]
f = functools.partial(_get_application, application_name)
return loop.run(run_in_model(model_name, f, add_model_arg=True))
def get_units(model_name, application_name):
"""Return all the units of a given application
@@ -243,6 +280,57 @@ def get_app_ips(model_name, application_name):
return [u.public_address for u in get_units(model_name, application_name)]
def get_application_config(model_name, application_name):
"""Return application configuration
:param model_name: Name of model to query.
:type model_name: str
:param application_name: Name of application
:type application_name: str
:returns: Dictionary of configuration
:rtype: dict
"""
async def _get_config(application_name, model):
return await model.applications[application_name].get_config()
f = functools.partial(_get_config, application_name)
return loop.run(run_in_model(model_name, f, add_model_arg=True))
def set_application_config(model_name, application_name, configuration):
"""Set application configuration
:param model_name: Name of model to query.
:type model_name: str
:param application_name: Name of application
:type application_name: str
:param key: Dictionary of configuration setting(s)
:type key: dict
:returns: None
:rtype: None
"""
async def _set_config(application_name, model, configuration):
return await (model.applications[application_name]
.set_config(configuration))
f = functools.partial(_set_config, application_name, configuration)
return loop.run(run_in_model(model_name, f, add_model_arg=True))
def get_status(model_name):
"""Return full status
:param model_name: Name of model to query.
:type model_name: str
:returns: dictionary of juju status
:rtype: dict
"""
async def _get_status(model):
return await model.get_status()
f = functools.partial(_get_status)
return loop.run(run_in_model(model_name, f, add_model_arg=True))
def main():
# Run the deploy coroutine in an asyncio event loop, using a helper
# that abstracts loop creation and teardown.

View File

View File

@@ -0,0 +1,3 @@
class MissingOSAthenticationException(Exception):
pass

812
zaza/utilities/openstack_utils.py Executable file
View File

@@ -0,0 +1,812 @@
#!/usr/bin/env python
from .os_versions import (
OPENSTACK_CODENAMES,
SWIFT_CODENAMES,
PACKAGE_CODENAMES,
)
from keystoneclient.v2_0 import client as keystoneclient_v2
from keystoneclient.v3 import client as keystoneclient_v3
from keystoneauth1 import session
from keystoneauth1.identity import (
v3,
v2,
)
from novaclient import client as novaclient_client
from neutronclient.v2_0 import client as neutronclient
from neutronclient.common import exceptions as neutronexceptions
import logging
import os
import re
import six
import sys
import juju_wait
from zaza import model
from zaza.charm_lifecycle import utils as lifecycle_utils
from zaza.utilities import (
exceptions,
test_utils,
)
CHARM_TYPES = {
'neutron': {
'pkg': 'neutron-common',
'origin_setting': 'openstack-origin'
},
'nova': {
'pkg': 'nova-common',
'origin_setting': 'openstack-origin'
},
'glance': {
'pkg': 'glance-common',
'origin_setting': 'openstack-origin'
},
'cinder': {
'pkg': 'cinder-common',
'origin_setting': 'openstack-origin'
},
'keystone': {
'pkg': 'keystone',
'origin_setting': 'openstack-origin'
},
'openstack-dashboard': {
'pkg': 'openstack-dashboard',
'origin_setting': 'openstack-origin'
},
'ceilometer': {
'pkg': 'ceilometer-common',
'origin_setting': 'openstack-origin'
},
}
UPGRADE_SERVICES = [
{'name': 'keystone', 'type': CHARM_TYPES['keystone']},
{'name': 'nova-cloud-controller', 'type': CHARM_TYPES['nova']},
{'name': 'nova-compute', 'type': CHARM_TYPES['nova']},
{'name': 'neutron-api', 'type': CHARM_TYPES['neutron']},
{'name': 'neutron-gateway', 'type': CHARM_TYPES['neutron']},
{'name': 'glance', 'type': CHARM_TYPES['glance']},
{'name': 'cinder', 'type': CHARM_TYPES['cinder']},
{'name': 'openstack-dashboard',
'type': CHARM_TYPES['openstack-dashboard']},
{'name': 'ceilometer', 'type': CHARM_TYPES['ceilometer']},
]
# Openstack Client helpers
def get_nova_creds(cloud_creds):
auth = get_ks_creds(cloud_creds)
if cloud_creds.get('OS_PROJECT_ID'):
auth['project_id'] = cloud_creds.get('OS_PROJECT_ID')
return auth
def get_ks_creds(cloud_creds, scope='PROJECT'):
if cloud_creds.get('API_VERSION', 2) == 2:
auth = {
'username': cloud_creds['OS_USERNAME'],
'password': cloud_creds['OS_PASSWORD'],
'auth_url': cloud_creds['OS_AUTH_URL'],
'tenant_name': (cloud_creds.get('OS_PROJECT_NAME') or
cloud_creds['OS_TENANT_NAME']),
}
else:
if scope == 'DOMAIN':
auth = {
'username': cloud_creds['OS_USERNAME'],
'password': cloud_creds['OS_PASSWORD'],
'auth_url': cloud_creds['OS_AUTH_URL'],
'user_domain_name': cloud_creds['OS_USER_DOMAIN_NAME'],
'domain_name': cloud_creds['OS_DOMAIN_NAME'],
}
else:
auth = {
'username': cloud_creds['OS_USERNAME'],
'password': cloud_creds['OS_PASSWORD'],
'auth_url': cloud_creds['OS_AUTH_URL'],
'user_domain_name': cloud_creds['OS_USER_DOMAIN_NAME'],
'project_domain_name': cloud_creds['OS_PROJECT_DOMAIN_NAME'],
'project_name': cloud_creds['OS_PROJECT_NAME'],
}
return auth
def get_nova_client(novarc_creds, insecure=True):
nova_creds = get_nova_creds(novarc_creds)
nova_creds['insecure'] = insecure
nova_creds['version'] = 2
return novaclient_client.Client(**nova_creds)
def get_nova_session_client(session):
return novaclient_client.Client(2, session=session)
def get_neutron_client(novarc_creds, insecure=True):
neutron_creds = get_ks_creds(novarc_creds)
neutron_creds['insecure'] = insecure
return neutronclient.Client(**neutron_creds)
def get_neutron_session_client(session):
return neutronclient.Client(session=session)
def get_keystone_session(novarc_creds, insecure=True, scope='PROJECT'):
keystone_creds = get_ks_creds(novarc_creds, scope=scope)
if novarc_creds.get('API_VERSION', 2) == 2:
auth = v2.Password(**keystone_creds)
else:
auth = v3.Password(**keystone_creds)
return session.Session(auth=auth, verify=not insecure)
def get_keystone_session_client(session):
return keystoneclient_v3.Client(session=session)
def get_keystone_client(novarc_creds, insecure=True):
keystone_creds = get_ks_creds(novarc_creds)
if novarc_creds.get('API_VERSION', 2) == 2:
auth = v2.Password(**keystone_creds)
sess = session.Session(auth=auth, verify=True)
client = keystoneclient_v2.Client(session=sess)
else:
auth = v3.Password(**keystone_creds)
sess = get_keystone_session(novarc_creds, insecure)
client = keystoneclient_v3.Client(session=sess)
# This populates the client.service_catalog
client.auth_ref = auth.get_access(sess)
return client
def get_project_id(ks_client, project_name, api_version=2, domain_name=None):
domain_id = None
if domain_name:
domain_id = ks_client.domains.list(name=domain_name)[0].id
all_projects = ks_client.projects.list(domain=domain_id)
for t in all_projects:
if t._info['name'] == project_name:
return t._info['id']
return None
# Neutron Helpers
def get_gateway_uuids():
return test_utils.get_machine_uuids_for_application('neutron-gateway')
def get_ovs_uuids():
return test_utils.get_machine_uuids_for_application('neutron-openvswitch')
BRIDGE_MAPPINGS = 'bridge-mappings'
NEW_STYLE_NETWORKING = 'physnet1:br-ex'
def deprecated_external_networking(dvr_mode=False):
'''Determine whether deprecated external network mode is in use'''
bridge_mappings = None
if dvr_mode:
bridge_mappings = juju_get('neutron-openvswitch',
BRIDGE_MAPPINGS)
else:
bridge_mappings = juju_get('neutron-gateway',
BRIDGE_MAPPINGS)
if bridge_mappings == NEW_STYLE_NETWORKING:
return False
return True
def get_net_uuid(neutron_client, net_name):
network = neutron_client.list_networks(name=net_name)['networks'][0]
return network['id']
def get_admin_net(neutron_client):
for net in neutron_client.list_networks()['networks']:
if net['name'].endswith('_admin_net'):
return net
def configure_gateway_ext_port(novaclient, neutronclient,
dvr_mode=None, net_id=None):
if dvr_mode:
uuids = get_ovs_uuids()
else:
uuids = get_gateway_uuids()
deprecated_extnet_mode = deprecated_external_networking(dvr_mode)
config_key = 'data-port'
if deprecated_extnet_mode:
config_key = 'ext-port'
if not net_id:
net_id = get_admin_net(neutronclient)['id']
for uuid in uuids:
server = novaclient.servers.get(uuid)
ext_port_name = "{}_ext-port".format(server.name)
for port in neutronclient.list_ports(device_id=server.id)['ports']:
if port['name'] == ext_port_name:
logging.warning('Neutron Gateway already has additional port')
break
else:
logging.info('Attaching additional port to instance, '
'connected to net id: {}'.format(net_id))
body_value = {
"port": {
"admin_state_up": True,
"name": ext_port_name,
"network_id": net_id,
"port_security_enabled": False,
}
}
port = neutronclient.create_port(body=body_value)
server.interface_attach(port_id=port['port']['id'],
net_id=None, fixed_ip=None)
ext_br_macs = []
for port in neutronclient.list_ports(network_id=net_id)['ports']:
if 'ext-port' in port['name']:
if deprecated_extnet_mode:
ext_br_macs.append(port['mac_address'])
else:
ext_br_macs.append('br-ex:{}'.format(port['mac_address']))
ext_br_macs.sort()
ext_br_macs_str = ' '.join(ext_br_macs)
if dvr_mode:
application_name = 'neutron-openvswitch'
else:
application_name = 'neutron-gateway'
# XXX Trying to track down a failure with juju run neutron-gateway/0 in
# the post juju_set check. Try a sleep here to see if some network
# reconfigureing on the gateway is still in progress and that's
# causing the issue
if ext_br_macs:
logging.info('Setting {} on {} external port to {}'.format(
config_key, application_name, ext_br_macs_str))
current_data_port = juju_get(application_name, config_key)
if current_data_port == ext_br_macs_str:
logging.info('Config already set to value')
return
model.set_application_config(
lifecycle_utils.get_juju_model(), application_name,
configuration={config_key: ext_br_macs_str})
juju_wait.wait(wait_for_workload=True)
def create_project_network(neutron_client, project_id, net_name='private',
shared=False, network_type='gre', domain=None):
networks = neutron_client.list_networks(name=net_name)
if len(networks['networks']) == 0:
logging.info('Creating network: %s',
net_name)
network_msg = {
'network': {
'name': net_name,
'shared': shared,
'tenant_id': project_id,
}
}
if network_type == 'vxlan':
network_msg['network']['provider:segmentation_id'] = 1233
network_msg['network']['provider:network_type'] = network_type
network = neutron_client.create_network(network_msg)['network']
else:
logging.warning('Network %s already exists.', net_name)
network = networks['networks'][0]
return network
def create_external_network(neutron_client, project_id, dvr_mode,
net_name='ext_net'):
networks = neutron_client.list_networks(name=net_name)
if len(networks['networks']) == 0:
logging.info('Configuring external network')
network_msg = {
'name': net_name,
'router:external': True,
'tenant_id': project_id,
}
if not deprecated_external_networking(dvr_mode):
network_msg['provider:physical_network'] = 'physnet1'
network_msg['provider:network_type'] = 'flat'
logging.info('Creating new external network definition: %s',
net_name)
network = neutron_client.create_network(
{'network': network_msg})['network']
logging.info('New external network created: %s', network['id'])
else:
logging.warning('Network %s already exists.', net_name)
network = networks['networks'][0]
return network
def create_project_subnet(neutron_client, project_id, network, cidr, dhcp=True,
subnet_name='private_subnet', domain=None,
subnetpool=None, ip_version=4, prefix_len=24):
# Create subnet
subnets = neutron_client.list_subnets(name=subnet_name)
if len(subnets['subnets']) == 0:
logging.info('Creating subnet')
subnet_msg = {
'subnet': {
'name': subnet_name,
'network_id': network['id'],
'enable_dhcp': dhcp,
'ip_version': ip_version,
'tenant_id': project_id
}
}
if subnetpool:
subnet_msg['subnet']['subnetpool_id'] = subnetpool['id']
subnet_msg['subnet']['prefixlen'] = prefix_len
else:
subnet_msg['subnet']['cidr'] = cidr
subnet = neutron_client.create_subnet(subnet_msg)['subnet']
else:
logging.warning('Subnet %s already exists.', subnet_name)
subnet = subnets['subnets'][0]
return subnet
def create_external_subnet(neutron_client, tenant_id, network,
default_gateway=None, cidr=None,
start_floating_ip=None, end_floating_ip=None,
subnet_name='ext_net_subnet'):
subnets = neutron_client.list_subnets(name=subnet_name)
if len(subnets['subnets']) == 0:
subnet_msg = {
'name': subnet_name,
'network_id': network['id'],
'enable_dhcp': False,
'ip_version': 4,
'tenant_id': tenant_id
}
if default_gateway:
subnet_msg['gateway_ip'] = default_gateway
if cidr:
subnet_msg['cidr'] = cidr
if (start_floating_ip and end_floating_ip):
allocation_pool = {
'start': start_floating_ip,
'end': end_floating_ip,
}
subnet_msg['allocation_pools'] = [allocation_pool]
logging.info('Creating new subnet')
subnet = neutron_client.create_subnet({'subnet': subnet_msg})['subnet']
logging.info('New subnet created: %s', subnet['id'])
else:
logging.warning('Subnet %s already exists.', subnet_name)
subnet = subnets['subnets'][0]
return subnet
def update_subnet_dns(neutron_client, subnet, dns_servers):
msg = {
'subnet': {
'dns_nameservers': dns_servers.split(',')
}
}
logging.info('Updating dns_nameservers (%s) for subnet',
dns_servers)
neutron_client.update_subnet(subnet['id'], msg)
def create_provider_router(neutron_client, tenant_id):
routers = neutron_client.list_routers(name='provider-router')
if len(routers['routers']) == 0:
logging.info('Creating provider router for external network access')
router_info = {
'router': {
'name': 'provider-router',
'tenant_id': tenant_id
}
}
router = neutron_client.create_router(router_info)['router']
logging.info('New router created: %s', (router['id']))
else:
logging.warning('Router provider-router already exists.')
router = routers['routers'][0]
return router
def plug_extnet_into_router(neutron_client, router, network):
ports = neutron_client.list_ports(device_owner='network:router_gateway',
network_id=network['id'])
if len(ports['ports']) == 0:
logging.info('Plugging router into ext_net')
router = neutron_client.add_gateway_router(
router=router['id'],
body={'network_id': network['id']})
logging.info('Router connected')
else:
logging.warning('Router already connected')
def plug_subnet_into_router(neutron_client, router, network, subnet):
routers = neutron_client.list_routers(name=router)
if len(routers['routers']) == 0:
logging.error('Unable to locate provider router %s', router)
sys.exit(1)
else:
# Check to see if subnet already plugged into router
ports = neutron_client.list_ports(
device_owner='network:router_interface',
network_id=network['id'])
if len(ports['ports']) == 0:
logging.info('Adding interface from subnet to %s' % (router))
router = routers['routers'][0]
neutron_client.add_interface_router(router['id'],
{'subnet_id': subnet['id']})
else:
logging.warning('Router already connected to subnet')
def create_address_scope(neutron_client, project_id, name, ip_version=4):
"""Create address scope
:param ip_version: integer 4 or 6
:param name: strint name for the address scope
"""
address_scopes = neutron_client.list_address_scopes(name=name)
if len(address_scopes['address_scopes']) == 0:
logging.info('Creating {} address scope'.format(name))
address_scope_info = {
'address_scope': {
'name': name,
'shared': True,
'ip_version': ip_version,
'tenant_id': project_id,
}
}
address_scope = neutron_client.create_address_scope(
address_scope_info)['address_scope']
logging.info('New address scope created: %s', (address_scope['id']))
else:
logging.warning('Address scope {} already exists.'.format(name))
address_scope = address_scopes['address_scopes'][0]
return address_scope
def create_subnetpool(neutron_client, project_id, name, subnetpool_prefix,
address_scope, shared=True, domain=None):
subnetpools = neutron_client.list_subnetpools(name=name)
if len(subnetpools['subnetpools']) == 0:
logging.info('Creating subnetpool: %s',
name)
subnetpool_msg = {
'subnetpool': {
'name': name,
'shared': shared,
'tenant_id': project_id,
'prefixes': [subnetpool_prefix],
'address_scope_id': address_scope['id'],
}
}
subnetpool = neutron_client.create_subnetpool(
subnetpool_msg)['subnetpool']
else:
logging.warning('Network %s already exists.', name)
subnetpool = subnetpools['subnetpools'][0]
return subnetpool
def create_bgp_speaker(neutron_client, local_as=12345, ip_version=4,
name='bgp-speaker'):
"""Create BGP Speaker
@param neutron_client: Instance of neutronclient.v2.Client
@param local_as: int Local Autonomous System Number
@returns dict BGP Speaker object
"""
bgp_speakers = neutron_client.list_bgp_speakers(name=name)
if len(bgp_speakers['bgp_speakers']) == 0:
logging.info('Creating BGP Speaker')
bgp_speaker_msg = {
'bgp_speaker': {
'name': name,
'local_as': local_as,
'ip_version': ip_version,
}
}
bgp_speaker = neutron_client.create_bgp_speaker(
bgp_speaker_msg)['bgp_speaker']
else:
logging.warning('BGP Speaker %s already exists.', name)
bgp_speaker = bgp_speakers['bgp_speakers'][0]
return bgp_speaker
def add_network_to_bgp_speaker(neutron_client, bgp_speaker, network_name):
"""Advertise network on BGP Speaker
@param neutron_client: Instance of neutronclient.v2.Client
@param bgp_speaker: dict BGP Speaker object
@param network_name: str Name of network to advertise
@returns None
"""
network_id = get_net_uuid(neutron_client, network_name)
# There is no direct way to determine which networks have already
# been advertised. For example list_route_advertised_from_bgp_speaker shows
# ext_net as FIP /32s.
# Handle the expected exception if the route is already advertised
try:
logging.info('Advertising {} network on BGP Speaker {}'
.format(network_name, bgp_speaker['name']))
neutron_client.add_network_to_bgp_speaker(bgp_speaker['id'],
{'network_id': network_id})
except neutronexceptions.InternalServerError:
logging.warning('{} network already advertised.'.format(network_name))
def create_bgp_peer(neutron_client, peer_application_name='quagga',
remote_as=10000, auth_type='none'):
"""Create BGP Peer
@param neutron_client: Instance of neutronclient.v2.Client
@param peer_application_name: str Name of juju application to find peer IP
Default: 'quagga'
@param remote_as: int Remote Autonomous System Number
@param auth_type: str BGP authentication type.
Default: 'none'
@returns dict BGP Peer object
"""
peer_unit = model.get_units(
lifecycle_utils.get_juju_model(), peer_application_name)[0]
peer_ip = peer_unit.public_address
bgp_peers = neutron_client.list_bgp_peers(name=peer_application_name)
if len(bgp_peers['bgp_peers']) == 0:
logging.info('Creating BGP Peer')
bgp_peer_msg = {
'bgp_peer': {
'name': peer_application_name,
'peer_ip': peer_ip,
'remote_as': remote_as,
'auth_type': auth_type,
}
}
bgp_peer = neutron_client.create_bgp_peer(bgp_peer_msg)['bgp_peer']
else:
logging.warning('BGP Peer %s already exists.', peer_ip)
bgp_peer = bgp_peers['bgp_peers'][0]
return bgp_peer
def add_peer_to_bgp_speaker(neutron_client, bgp_speaker, bgp_peer):
"""Setup BGP peering relationship with BGP Peer and BGP Speaker
@param neutron_client: Instance of neutronclient.v2.Client
@param bgp_speaker: dict BGP Speaker object
@param bgp_peer: dict BGP Peer object
@returns None
"""
# Handle the expected exception if the peer is already on the
# speaker
try:
logging.info('Adding peer {} on BGP Speaker {}'
.format(bgp_peer['name'], bgp_speaker['name']))
neutron_client.add_peer_to_bgp_speaker(bgp_speaker['id'],
{'bgp_peer_id': bgp_peer['id']})
except neutronexceptions.Conflict:
logging.warning('{} peer already on BGP speaker.'
.format(bgp_peer['name']))
def get_swift_codename(version):
'''Determine OpenStack codename that corresponds to swift version.'''
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
return codenames[0]
def get_os_code_info(package, pkg_version):
# {'code_num': entry, 'code_name': OPENSTACK_CODENAMES[entry]}
# Remove epoch if it exists
if ':' in pkg_version:
pkg_version = pkg_version.split(':')[1:][0]
if 'swift' in package:
# Fully x.y.z match for swift versions
match = re.match('^(\d+)\.(\d+)\.(\d+)', pkg_version)
else:
# x.y match only for 20XX.X
# and ignore patch level for other packages
match = re.match('^(\d+)\.(\d+)', pkg_version)
if match:
vers = match.group(0)
# Generate a major version number for newer semantic
# versions of openstack projects
major_vers = vers.split('.')[0]
if (package in PACKAGE_CODENAMES and
major_vers in PACKAGE_CODENAMES[package]):
return PACKAGE_CODENAMES[package][major_vers]
else:
# < Liberty co-ordinated project versions
if 'swift' in package:
return get_swift_codename(vers)
else:
return OPENSTACK_CODENAMES[vers]
def get_current_os_versions(deployed_services):
versions = {}
for service in UPGRADE_SERVICES:
if service['name'] not in deployed_services:
continue
version = test_utils.get_pkg_version(service['name'],
service['type']['pkg'])
versions[service['name']] = get_os_code_info(service['type']['pkg'],
version)
return versions
def get_lowest_os_version(current_versions):
lowest_version = 'zebra'
for svc in current_versions.keys():
if current_versions[svc] < lowest_version:
lowest_version = current_versions[svc]
return lowest_version
def juju_get_config_keys(application):
logging.warn("Deprecated function: juju_get_config_keys. Use "
"get_application_config_keys")
return get_application_config_keys(application)
def get_application_config_keys(application):
application_config = model.get_application_config(
lifecycle_utils.get_juju_model(), application)
return list(application_config.keys())
def juju_get(application, option):
logging.warn("Deprecated function: juju_get. Use "
"get_application_config_option")
return get_application_config_option(application, option)
def get_application_config_option(application, option):
application_config = model.get_application_config(
lifecycle_utils.get_juju_model(), application)
try:
return application_config.get(option).get('value')
except AttributeError:
return None
def get_undercloud_auth():
""" Get the undercloud OpenStack authentication settings from the
environment.
@raises MissingOSAthenticationException if one or more settings are
missing.
@returns Dictionary of authentication settings
"""
os_auth_url = os.environ.get('OS_AUTH_URL')
if os_auth_url:
api_version = os_auth_url.split('/')[-1].replace('v', '')
else:
logging.error('Missing OS authentication setting: OS_AUTH_URL')
raise exceptions.MissingOSAthenticationException(
'One or more OpenStack authetication variables could '
'be found in the environment. Please export the OS_* '
'settings into the environment.')
logging.info('AUTH_URL: {}, api_ver: {}'.format(os_auth_url, api_version))
if api_version == '2.0':
# V2
logging.info('Using keystone API V2 for undercloud auth')
auth_settings = {
'OS_AUTH_URL': os.environ.get('OS_AUTH_URL'),
'OS_TENANT_NAME': os.environ.get('OS_TENANT_NAME'),
'OS_USERNAME': os.environ.get('OS_USERNAME'),
'OS_PASSWORD': os.environ.get('OS_PASSWORD'),
'OS_REGION_NAME': os.environ.get('OS_REGION_NAME'),
'API_VERSION': 2,
}
elif api_version >= '3':
# V3 or later
logging.info('Using keystone API V3 (or later) for undercloud auth')
domain = os.environ.get('OS_DOMAIN_NAME')
auth_settings = {
'OS_AUTH_URL': os.environ.get('OS_AUTH_URL'),
'OS_USERNAME': os.environ.get('OS_USERNAME'),
'OS_PASSWORD': os.environ.get('OS_PASSWORD'),
'OS_REGION_NAME': os.environ.get('OS_REGION_NAME'),
'API_VERSION': 3,
}
if domain:
auth_settings['OS_DOMAIN_NAME': 'admin_domain'] = domain
else:
auth_settings['OS_USER_DOMAIN_NAME'] = (
os.environ.get('OS_USER_DOMAIN_NAME'))
auth_settings['OS_PROJECT_NAME'] = (
os.environ.get('OS_PROJECT_NAME'))
auth_settings['OS_PROJECT_DOMAIN_NAME'] = (
os.environ.get('OS_PROJECT_DOMAIN_NAME'))
os_project_id = os.environ.get('OS_PROJECT_ID')
if os_project_id is not None:
auth_settings['OS_PROJECT_ID'] = os_project_id
# Validate settings
for key, settings in list(auth_settings.items()):
if settings is None:
logging.error('Missing OS authentication setting: {}'
''.format(key))
raise exceptions.MissingOSAthenticationException(
'One or more OpenStack authetication variables could '
'be found in the environment. Please export the OS_* '
'settings into the environment.')
return auth_settings
# Openstack Client helpers
def get_keystone_ip():
if juju_get('keystone', 'vip'):
return juju_get('keystone', 'vip')
unit = model.get_units(
lifecycle_utils.get_juju_model(), 'keystone')[0]
return unit.public_address
def get_auth_url():
logging.warn("Deprecated function: get_auth_url. Use get_keystone_ip")
return get_keystone_ip()
def get_overcloud_auth():
if juju_get('keystone', 'use-https').lower() == 'yes':
transport = 'https'
port = 35357
else:
transport = 'http'
port = 5000
address = get_auth_url()
os_version = get_current_os_versions('keystone')['keystone']
api_version = juju_get('keystone', 'preferred-api-version')
if os_version >= 'queens':
api_version = 3
elif api_version is None:
api_version = 2
if api_version == 2:
# V2 Explicitly, or None when charm does not possess the config key
logging.info('Using keystone API V2 for overcloud auth')
auth_settings = {
'OS_AUTH_URL': '%s://%s:%i/v2.0' % (transport, address, port),
'OS_TENANT_NAME': 'admin',
'OS_USERNAME': 'admin',
'OS_PASSWORD': 'openstack',
'OS_REGION_NAME': 'RegionOne',
'API_VERSION': 2,
}
else:
# V3 or later
logging.info('Using keystone API V3 (or later) for overcloud auth')
auth_settings = {
'OS_AUTH_URL': '%s://%s:%i/v3' % (transport, address, port),
'OS_USERNAME': 'admin',
'OS_PASSWORD': 'openstack',
'OS_REGION_NAME': 'RegionOne',
'OS_DOMAIN_NAME': 'admin_domain',
'OS_USER_DOMAIN_NAME': 'admin_domain',
'OS_PROJECT_NAME': 'admin',
'OS_PROJECT_DOMAIN_NAME': 'admin_domain',
'API_VERSION': 3,
}
return auth_settings

View File

@@ -0,0 +1,153 @@
from collections import OrderedDict
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
])
OPENSTACK_CODENAMES = OrderedDict([
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
('2013.1', 'grizzly'),
('2013.2', 'havana'),
('2014.1', 'icehouse'),
('2014.2', 'juno'),
('2015.1', 'kilo'),
('2015.2', 'liberty'),
('2016.1', 'mitaka'),
('2016.2', 'newton'),
('2017.1', 'ocata'),
('2017.2', 'pike'),
('2018.1', 'queens'),
])
# The ugly duckling - must list releases oldest to newest
SWIFT_CODENAMES = OrderedDict([
('diablo',
['1.4.3']),
('essex',
['1.4.8']),
('folsom',
['1.7.4']),
('grizzly',
['1.7.6', '1.7.7', '1.8.0']),
('havana',
['1.9.0', '1.9.1', '1.10.0']),
('icehouse',
['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
('juno',
['2.0.0', '2.1.0', '2.2.0']),
('kilo',
['2.2.1', '2.2.2']),
('liberty',
['2.3.0', '2.4.0', '2.5.0']),
('mitaka',
['2.5.0', '2.6.0', '2.7.0']),
('newton',
['2.8.0', '2.9.0']),
('ocata',
['2.11.0', '2.12.0', '2.13.0']),
('pike',
['2.13.0', '2.15.0']),
])
# >= Liberty version->codename mapping
PACKAGE_CODENAMES = {
'nova-common': OrderedDict([
('12', 'liberty'),
('13', 'mitaka'),
('14', 'newton'),
('15', 'ocata'),
('16', 'pike'),
('17', 'queens'),
('18', 'rocky'),
]),
'neutron-common': OrderedDict([
('7', 'liberty'),
('8', 'mitaka'),
('9', 'newton'),
('10', 'ocata'),
('11', 'pike'),
('12', 'queens'),
('13', 'rocky'),
]),
'cinder-common': OrderedDict([
('7', 'liberty'),
('8', 'mitaka'),
('9', 'newton'),
('10', 'ocata'),
('11', 'pike'),
('12', 'queens'),
('13', 'rocky'),
]),
'keystone': OrderedDict([
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
('12', 'pike'),
('13', 'queens'),
('14', 'rocky'),
]),
'horizon-common': OrderedDict([
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
('12', 'pike'),
('13', 'queens'),
('14', 'rocky'),
]),
'ceilometer-common': OrderedDict([
('5', 'liberty'),
('6', 'mitaka'),
('7', 'newton'),
('8', 'ocata'),
('9', 'pike'),
('10', 'queens'),
('11', 'rocky'),
]),
'heat-common': OrderedDict([
('5', 'liberty'),
('6', 'mitaka'),
('7', 'newton'),
('8', 'ocata'),
('9', 'pike'),
('10', 'queens'),
('11', 'rocky'),
]),
'glance-common': OrderedDict([
('11', 'liberty'),
('12', 'mitaka'),
('13', 'newton'),
('14', 'ocata'),
('15', 'pike'),
('16', 'queens'),
('17', 'rocky'),
]),
'openstack-dashboard': OrderedDict([
('8', 'liberty'),
('9', 'mitaka'),
('10', 'newton'),
('11', 'ocata'),
('12', 'pike'),
('13', 'queens'),
('14', 'rocky'),
]),
}

View File

@@ -0,0 +1,233 @@
#!/usr/bin/env python
import logging
import os
import six
import subprocess
import yaml
from zaza import model
from zaza.charm_lifecycle import utils as lifecycle_utils
def get_network_env_vars():
"""Get environment variables with names which are consistent with
network.yaml keys; Also get network environment variables as commonly
used by openstack-charm-testing and ubuntu-openstack-ci automation.
Return a dictionary compatible with openstack-mojo-specs network.yaml
key structure."""
# Example o-c-t & uosci environment variables:
# NET_ID="a705dd0f-5571-4818-8c30-4132cc494668"
# GATEWAY="172.17.107.1"
# CIDR_EXT="172.17.107.0/24"
# CIDR_PRIV="192.168.121.0/24"
# NAMESERVER="10.5.0.2"
# FIP_RANGE="172.17.107.200:172.17.107.249"
# AMULET_OS_VIP00="172.17.107.250"
# AMULET_OS_VIP01="172.17.107.251"
# AMULET_OS_VIP02="172.17.107.252"
# AMULET_OS_VIP03="172.17.107.253"
_vars = {}
_vars['net_id'] = os.environ.get('NET_ID')
_vars['external_dns'] = os.environ.get('NAMESERVER')
_vars['default_gateway'] = os.environ.get('GATEWAY')
_vars['external_net_cidr'] = os.environ.get('CIDR_EXT')
_vars['private_net_cidr'] = os.environ.get('CIDR_PRIV')
_fip_range = os.environ.get('FIP_RANGE')
if _fip_range and ':' in _fip_range:
_vars['start_floating_ip'] = os.environ.get('FIP_RANGE').split(':')[0]
_vars['end_floating_ip'] = os.environ.get('FIP_RANGE').split(':')[1]
_vips = [os.environ.get('AMULET_OS_VIP00'),
os.environ.get('AMULET_OS_VIP01'),
os.environ.get('AMULET_OS_VIP02'),
os.environ.get('AMULET_OS_VIP03')]
# Env var naming consistent with network.yaml takes priority
_keys = ['default_gateway'
'start_floating_ip',
'end_floating_ip',
'external_dns',
'external_net_cidr',
'external_net_name',
'external_subnet_name',
'network_type',
'private_net_cidr',
'router_name']
for _key in _keys:
_val = os.environ.get(_key)
if _val:
_vars[_key] = _val
# Remove keys and items with a None value
_vars['vips'] = [_f for _f in _vips if _f]
for k, v in list(_vars.items()):
if not v:
del _vars[k]
return _vars
def dict_to_yaml(dict_data):
return yaml.dump(dict_data, default_flow_style=False)
def get_yaml_config(config_file):
# Note in its original form get_mojo_config it would do a search pattern
# through mojo stage directories. This version assumes the yaml file is in
# the pwd.
logging.info('Using config %s' % (config_file))
return yaml.load(open(config_file, 'r').read())
def get_net_info(net_topology, ignore_env_vars=False):
"""Get network info from network.yaml, override the values if specific
environment variables are set."""
net_info = get_yaml_config('network.yaml')[net_topology]
if not ignore_env_vars:
logging.info('Consuming network environment variables as overrides.')
net_info.update(get_network_env_vars())
logging.info('Network info: {}'.format(dict_to_yaml(net_info)))
return net_info
def parse_arg(options, arg, multiargs=False):
if arg.upper() in os.environ:
if multiargs:
return os.environ[arg.upper()].split()
else:
return os.environ[arg.upper()]
else:
return getattr(options, arg)
def remote_run(unit, remote_cmd=None, timeout=None, fatal=None):
logging.warn("Deprecate as soons as possible. Use model.run_on_unit() as "
"soon as libjuju unit.run returns output.")
if fatal is None:
fatal = True
cmd = ['juju', 'run', '--unit', unit]
if timeout:
cmd.extend(['--timeout', str(timeout)])
if remote_cmd:
cmd.append(remote_cmd)
else:
cmd.append('uname -a')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = p.communicate()
if six.PY3:
output = (output[0].decode('utf-8'), output[1])
if p.returncode != 0 and fatal:
raise Exception('Error running remote command')
return output
def get_pkg_version(application, pkg):
versions = []
units = model.get_units(
lifecycle_utils.get_juju_model(), application)
for unit in units:
cmd = 'dpkg -l | grep {}'.format(pkg)
out = remote_run(unit.entity_id, cmd)
versions.append(out[0].split()[2])
if len(set(versions)) != 1:
raise Exception('Unexpected output from pkg version check')
return versions[0]
def get_cloud_from_controller():
""" Get the cloud name from the Juju 2.x controller
@returns String name of the cloud for the current Juju 2.x controller
"""
cmd = ['juju', 'show-controller', '--format=yaml']
output = subprocess.check_output(cmd)
if six.PY3:
output = output.decode('utf-8')
cloud_config = yaml.load(output)
# There will only be one top level controller from show-controller,
# but we do not know its name.
assert len(cloud_config) == 1
try:
return list(cloud_config.values())[0]['details']['cloud']
except KeyError:
raise KeyError("Failed to get cloud information from the controller")
def get_provider_type():
""" Get the type of the undercloud
@returns String name of the undercloud type
"""
juju_env = subprocess.check_output(['juju', 'switch'])
if six.PY3:
juju_env = juju_env.decode('utf-8')
juju_env = juju_env.strip('\n')
cloud = get_cloud_from_controller()
if cloud:
# If the controller was deployed from this system with
# the cloud configured in ~/.local/share/juju/clouds.yaml
# Determine the cloud type directly
cmd = ['juju', 'show-cloud', cloud, '--format=yaml']
output = subprocess.check_output(cmd)
if six.PY3:
output = output.decode('utf-8')
return yaml.load(output)['type']
else:
# If the controller was deployed elsewhere
# show-controllers unhelpfully returns an empty string for cloud
# For now assume openstack
return 'openstack'
def get_full_juju_status():
status = model.get_status(lifecycle_utils.get_juju_model())
return status
def get_application_status(application=None, unit=None):
status = get_full_juju_status()
if application:
status = status.applications.get(application)
if unit:
status = status.units.get(unit)
return status
def get_machine_status(machine, key=None):
status = get_full_juju_status()
status = status.machines.get(machine)
if key:
status = status.get(key)
return status
def get_machines_for_application(application):
status = get_application_status(application)
machines = []
for unit in status.get('units').keys():
machines.append(
status.get('units').get(unit).get('machine'))
return machines
def get_machine_uuids_for_application(application):
uuids = []
for machine in get_machines_for_application(application):
uuids.append(get_machine_status(machine, key='instance-id'))
return uuids
def setup_logging():
logFormatter = logging.Formatter(
fmt="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
rootLogger = logging.getLogger()
rootLogger.setLevel('INFO')
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)