Compare commits

4 Commits

Author SHA1 Message Date
994c15f18c Update cloud-init 2020-12-25 17:43:43 +00:00
d3453fcab9 Add user_dats file and update password 2020-12-25 17:03:47 +00:00
74c75cb9b7 Update config, and add deploying of hypervisor node 2020-12-25 15:48:50 +00:00
643a35fce2 Start of snap stuff for maas 2020-12-24 20:25:58 +00:00
29 changed files with 648 additions and 1706 deletions

1
.gitignore vendored
View File

@@ -1,4 +1,3 @@
maas.config
maas.debconf
*.xml
*.yaml

View File

@@ -4,15 +4,13 @@ This is a quick-and-dirty set of shell scripts that will build out and
bootstrap a MAAS environment with all of the bits and pieces you need to get
it running for any cloud, any workload.
* `manage-vm-nodes.sh.........`: Create kvm instances that MAAS will manage
* `manage-hypervisor-nodes.sh.`: Create hypervisors that MAAS will manage
* `bootstrap-maas.sh..........`: Build and bootstrap your MAAS environment
* `functions.sh...............`: Common function that the first 2 scripts use
* `user-data.yaml.............`: cloud-init for hypervisor nodes
* `manage-maas-nodes`...: Create kvm instances that MAAS will manage
* `bootstrap--maas.sh`..: Build and bootstrap your MAAS environment
There are plenty of options to customize its behavior, as well as drop in to
any step of the process without rebuilding the full MAAS from scratch.
## Requirements
Requires, minimally, `bash`, `jq` and a working Ubuntu environment. This
@@ -20,7 +18,8 @@ has **not** been tested on CentOS or Debian, but should work minimally on
those environments, if you choose to make that your host. Patches are
welcome, of course.
## Components - bootstrap-maas.sh
## Components
```
-a <cloud_name> Do EVERYTHING (maas, juju cloud, juju bootstrap)
@@ -33,46 +32,22 @@ welcome, of course.
-t <cloud_name> Tear down the cloud named <cloud_name>
```
## Components - manage-hypervisor-nodes.sh
```
-a <node> Create and Deploy
-c <node> Creates Hypervisor
-d <node> Deploy Hypervisor
-k <node> Add Hypervisor as Pod
-n <node> Assign Networks
-p <node> Update Partitioning
-w <node> Removes Hypervisor
```
## Components - manage-maas-nodes.sh
```
-c Creates everything
-w Removes everything
-d Releases VMs, Clears Disk
-n Updates all the networks on all VMs
-r Recommission all VMs
-j Only create juju VM
-z Adds the machines to their respective zones
```
## Misc - functions.sh
Many functions that are common between the 2 scripts above
## Misc - user-data.yaml
`cloud-init` file, that helps with deployment of the hypervisors. This helps
to automate the deployment of the hypervisor, which in turns grabs this repo
and deploys all the VMs required.
## Installing and testing MAAS
## Installing and testing MAAS
Just run `./bootstrap-maas.sh` with the appropriate option above.
Minimally, you'll want to use `./bootstrap-maas.sh -b` or `-i` to install
just the components needed.
I've done all the work needed to make this as idempotent as possible. It
will need some minor tweaks to get working with MAAS 2.4.x, because of the
will need some minor tweaks to get working with MAAS 2.4.x, becauase of the
newer PostgreSQL dependencies.
MAAS from snap is also not supported (yet) again for the same SQL
dependencies which are included inside the MAAS snap.
## TODO and What's Next
* Support for using MAAS from snap vs. main or PPA. With snap, postgresql
and other deps are installed in the snap, so handling has to change

14
TODO.md
View File

@@ -1,14 +0,0 @@
# Todo
1. ~~add the hypervisor as a flag rather then the configuration file when adding
hypervisors.~~
1. ~~Add script to cloud-init, that will allow to wipe disk. This will allow to
reboot the hypervisor, such that we don't have to log back in after the
machine has been re-commissioned.~~
1. ~~Update hypervisor config such that it works with focal~~
1. Add the ability to add multiple storage pools (1 x SSD, 1 x HDD)
1. ~~Update `boostrap-maas.sh`~~
1. ~~Snap implementation~~
1. ~~Adding VLANs and subnets~~
1. ~~Adding spaces~~

View File

@@ -1,105 +0,0 @@
#!/bin/bash
domain_name="example.com"
. functions.sh
setup_domain()
{
domains=$(maas ${maas_profile} domains read)
my_domain=$(echo $domains | jq '.[] | select(.name=="'${domain_name}'")')
if [[ -z $my_domain ]] ; then
maas ${maas_profile} domains create name=${domain_name}
fi
}
get_ip_from_juju()
{
from_app=""
case $dns_name in
"landscape")
juju_name="landscape-haproxy"
from_app="true"
model="-m lma"
;;
"graylog"|"nagios")
juju_name=${dns_name}
from_app="true"
model="-m lma"
;;
"dashboard")
juju_name="openstack-dashboard"
;;
"mysql")
juju_name="mysql-innodb-cluster"
;;
"neutron")
juju_name="neutron-api"
;;
"nova")
juju_name="nova-cloud-controller"
;;
*)
juju_name=${dns_name}
;;
esac
[[ -n "$from_app" ]] && juju status ${model} ${juju_name} --format json | jq .applications[\"${juju_name}\"].units[][\"public-address\"] | sed s/\"//g
[[ -z "$from_app" ]] && juju config ${model} ${juju_name} vip
}
add_record()
{
dns_name=$1
maas_dns_ip=$2
dns_name_result=$(maas ${maas_profile} dnsresources read name=${dns_name}-internal)
if [[ -n $(echo $dns_name_result | jq .[]) ]] ; then
dns_id=$(echo $dns_name_result | jq .[].id)
dns_ip=$(maas ${maas_profile} dnsresource update ${dns_id} fqdn=${dns_name}-internal.${domain_name} ip_addresses=${maas_dns_ip})
else
dns_ip=$(maas ${maas_profile} dnsresources create fqdn=${dns_name}-internal.${domain_name} ip_addresses=${maas_dns_ip})
fi
dns_cname_result=$(maas ${maas_profile} dnsresource-records read rrtype=CNAME name=${dns_name})
if [[ -n $(echo $dns_cname_result | jq .[]) ]] ; then
dns_id=$(echo $dns_cname_result | jq .[].id)
dns_cname=$(maas ${maas_profile} dnsresource-record update ${dns_id} rrtype=cname rrdata=${dns_name}-internal.${domain_name}.)
else
dns_cname=$(maas ${maas_profile} dnsresource-records create fqdn=${dns_name}.${domain_name} rrtype=cname rrdata=${dns_name}-internal.${domain_name}.)
fi
}
do_rmq_nodes()
{
rmq_ips=$(juju status --format json | jq -rc '.applications["rabbitmq-server"].units | to_entries[] | .value["public-address"]')
i=1
for ip in ${rmq_ips}
do
dns_name=rabbit-${i}
add_record ${dns_name} ${ip}
(( i++ ))
done
}
read_configs
maas_login
setup_domain
for app in ${maas_dns_names[*]} landscape graylog nagios ; do
add_record ${app} $(get_ip_from_juju $dns_name)
done
do_rmq_nodes

View File

@@ -1,3 +1,7 @@
hypervisor_name=asrock01
hypervisor_ip=10.0.1.253
hypervisor_mac="a8:a1:59:44:70:ac"
external_vlan=1
qemu_connection="qemu+ssh://virsh@${hypervisor_ip}/system"
@@ -5,10 +9,13 @@ qemu_password="SgUoBByKjG9Lj78SwfWAHXD8DvgE67Cu"
storage_path="/var/lib/libvirt/maas-images"
compute="as1-maas-node"
control_count=1
control_cpus=3
control_ram=8192
node_count=11
node_start=1
node_cpus=2
node_ram=4096

31
asrock02.config Normal file
View File

@@ -0,0 +1,31 @@
hypervisor_ip=10.0.1.251
hypervisor_mac="a8:a1:59:44:76:79"
qemu_connection="qemu+ssh://virsh@${hypervisor_ip}/system"
qemu_password="FPEW2H2hc2ot.HNinxpgGIGM88DI2be7"
storage_path="/var/lib/libvirt/maas-images"
compute="as2-maas-node"
control_count=1
control_cpus=3
control_ram=8192
node_count=11
node_start=1
node_cpus=2
node_ram=4096
disks=()
disks+=(50)
disks+=(20)
disks+=(20)
network_type=bridge
bridges=()
bridges+=("br0")
bridges+=("br1")
bridges+=("br2")
bridges+=("br3")
bridges+=("br4")

View File

@@ -1,8 +1,5 @@
#!/bin/bash
# set -x
. functions.sh
required_bins=( ip sudo debconf-set-selections )
check_bins() {
@@ -20,15 +17,15 @@ check_bins() {
}
read_config() {
if [ ! -f configs/maas.config ]; then
if [ ! -f maas.config ]; then
printf "Error: missing config file. Please create the file 'maas.config'.\n"
exit 1
else
shopt -s extglob
maas_config="configs/maas.config"
maas_config="maas.config"
source "$maas_config"
fi
if [[ $maas_pkg_type != "snap" ]] && [ ! -f maas.debconf ]; then
if [ ! -f maas.debconf ]; then
printf "Error: missing debconf file. Please create the file 'maas.debconf'.\n"
exit 1
fi
@@ -41,12 +38,12 @@ init_variables() {
core_packages=( jq moreutils uuid )
maas_packages=( maas maas-cli maas-proxy maas-dhcp maas-dns maas-rack-controller maas-region-api maas-common )
pg_packages=( postgresql postgresql-client postgresql-client-common postgresql-common )
pg_packages=( postgresql-10 postgresql-client postgresql-client-common postgresql-common )
maas_snaps=( maas maas-test-db )
}
remove_maas_deb() {
remove_maas() {
# Drop the MAAS db ("maasdb"), so we don't risk reusing it
sudo -u postgres psql -c "select pg_terminate_backend(pid) from pg_stat_activity where datname='maasdb'"
sudo -u postgres psql -c "drop database maasdb"
@@ -59,17 +56,13 @@ remove_maas_deb() {
for package in "${maas_packages[@]}" "${pg_packages[@]}"; do
sudo dpkg -P "$package"
done
sudo apt-add-repository ppa:maas/${maas_version} -y -r
}
remove_maas_snap() {
sudo snap remove --purge ${maas_snaps[@]}
sudo snap remove ${maas_snaps[@]}
}
install_maas_deb() {
sudo apt-add-repository ppa:maas/${maas_version} -y
install_maas() {
# This is separate from the removal, so we can handle them atomically
sudo apt-get -fuy --reinstall install "${core_packages}" "${maas_packages[@]}" "${pg_packages[@]}"
sudo sed -i 's/DISPLAY_LIMIT=5/DISPLAY_LIMIT=100/' /usr/share/maas/web/static/js/bundle/maas-min.js
@@ -77,11 +70,7 @@ install_maas_deb() {
install_maas_snap() {
sudo apt-get -fuy --reinstall install "${core_packages}"
# When we specify the channel, we have to install the snaps individually
for snap in ${maas_snaps[*]} ; do
sudo snap install ${snap} --channel=$maas_version/stable
done
sudo snap install ${maas_snaps[@]}
}
purge_admin_user() {
@@ -94,122 +83,72 @@ with deleted_user as (delete from auth_user where username = '$maas_profile' ret
delete from piston3_consumer where user_id = (select id from deleted_user);
EOF
[[ $maas_pkg_type == "snap" ]] && maas-test-db.psql -c "$purgeadmin" maasdb
[[ $maas_pkg_type == "deb" ]] && sudo -u postgres psql -c "$purgeadmin" maasdb
psql_cmd="psql"
[[ $maas_pkg_type == "snap" ]] && psql_cmd="maas-test-db.psql"
sudo -u postgres $psql_cmd -c "$purgeadmin" maasdb
}
build_maas() {
# Create the initial 'admin' user of MAAS, purge first!
purge_admin_user
[[ $maas_pkg_type == "snap" ]] && maas init region+rack --database-uri maas-test-db:/// --maas-url $maas_endpoint --force
sudo maas createadmin --username "$maas_profile" --password "$maas_pass" --email "$maas_profile"@"$maas_pass" --ssh-import lp:"$launchpad_user"
if [[ $maas_pkg_type == "deb" ]] ; then
sudo chsh -s /bin/bash maas
sudo chown -R maas:maas /var/lib/maas
fi
sudo chsh -s /bin/bash maas
sudo chown -R maas:maas /var/lib/maas
if [ -f ~/.maas-api.key ]; then
rm ~/.maas-api.key
fi
maas_cmd="maas-region"
[[ $maas_pkg_type == "snap" ]] && maas_cmd="maas"
maas_api_key="$(sudo ${maas_cmd} apikey --username $maas_profile | head -n 1 | tee ~/.maas-api.key)"
[[ $maas_pkg_type == "deb" ]] && maas_api_key="$(sudo maas-region apikey --username=$maas_profile | tee ~/.maas-api.key)"
[[ $maas_pkg_type == "snap" ]] && maas_api_key="$(sudo maas apikey --username $maas_profile | head -n 1 | tee ~/.maas-api.key)"
fi;
# Fetch the MAAS API key, store to a file for later reuse, also set this var to that value
maas login "$maas_profile" "$maas_endpoint" "$maas_api_key"
maas_system_id="$(maas $maas_profile nodes read hostname="$HOSTNAME" | jq -r '.[].interface_set[0].system_id')"
# Inject the maas SSH key if it exists
if [ -f ~/.ssh/maas_rsa.pub ]; then
maas_ssh_key=$(<~/.ssh/maas_rsa.pub)
maas $maas_profile sshkeys create "key=$maas_ssh_key"
fi
# Inject the maas SSH key
maas_ssh_key=$(<~/.ssh/maas_rsa.pub)
maas $maas_profile sshkeys create "key=$maas_ssh_key"
# Update settings to match our needs
maas $maas_profile maas set-config name=default_storage_layout value=lvm
maas $maas_profile maas set-config name=network_discovery value=disabled
maas $maas_profile maas set-config name=active_discovery_interval value=0
maas $maas_profile maas set-config name=kernel_opts value="console=ttyS0,115200 console=tty0,115200 elevator=noop zswap.enabled=1 zswap.compressor=lz4 zswap.max_pool_percent=20 zswap.zpool=z3fold intel_iommu=on iommu=pt debug nosplash scsi_mod.use_blk_mq=1 dm_mod.use_blk_mq=1 enable_mtrr_cleanup mtrr_spare_reg_nr=1 systemd.log_level=debug"
maas $maas_profile maas set-config name=maas_name value=us-east
maas $maas_profile maas set-config name=upstream_dns value="$maas_upstream_dns"
maas $maas_profile maas set-config name=dnssec_validation value=no
maas $maas_profile maas set-config name=enable_analytics value=false
maas $maas_profile maas set-config name=enable_http_proxy value=true
# maas $maas_profile maas set-config name=http_proxy value="$squid_proxy"
maas $maas_profile maas set-config name=enable_third_party_drivers value=false
maas $maas_profile maas set-config name=curtin_verbose value=true
[[ -n "$maas_upstream_dns" ]] && maas $maas_profile maas set-config name=upstream_dns value="${maas_upstream_dns}"
[[ -n "$maas_kernel_opts" ]] && maas $maas_profile maas set-config name=kernel_opts value="${maas_kernel_opts}"
[[ -n "$maas_name" ]] && maas $maas_profile maas set-config name=maas_name value=${maas_name}
if [[ -n "$squid_proxy" ]] ; then
maas $maas_profile maas set-config name=enable_http_proxy value=true
maas $maas_profile maas set-config name=http_proxy value="$squid_proxy"
fi
[[ -n "$maas_boot_source" ]] && maas $maas_profile boot-source update 1 url="$maas_boot_source"
[[ -n "$package_repository" ]] && maas $maas_profile package-repository update 1 name='main_archive' url="$package_repository"
# Ensure that we are only grabbing amd64 and not other arches as well
maas $maas_profile boot-source-selection update 1 1 arches="amd64"
# The release that is is downloading by default
default_release=$(maas $maas_profile boot-source-selection read 1 1 | jq -r .release)
# Add bionic if the default is focal, or vice-versa
[[ $default_release == "focal" ]] && other_release="bionic"
[[ $default_release == "bionic" ]] && other_release="focal"
[[ -n "$other_release" ]] && maas ${maas_profile} boot-source-selections create 1 os="ubuntu" release="${other_release}" arches="amd64" subarches="*" labels="*"
# Import the base images; this can take some time
echo "Importing boot images, please be patient, this may take some time..."
maas $maas_profile boot-resources import
maas $maas_profile boot-source update 1 url="$maas_boot_source"
# maas $maas_profile boot-source update 1 url=http://"$maas_bridge_ip":8765/maas/images/ephemeral-v3/daily/
maas $maas_profile package-repository update 1 name='main_archive' url="$package_repository"
# This is hacky, but it's the only way I could find to reliably get the
# correct subnet for the maas bridge interface
maas $maas_profile subnet update "$(maas $maas_profile subnets read | jq -rc --arg maas_ip "$maas_ip_range" '.[] | select(.name | contains($maas_ip)) | "\(.id)"')" \
gateway_ip="$maas_bridge_ip" dns_servers="$maas_bridge_ip"
maas $maas_profile subnet update "$(maas $maas_profile subnets read | jq -rc --arg maas_ip "$maas_ip_range" '.[] | select(.name | contains($maas_ip)) | "\(.id)"')" gateway_ip="$maas_bridge_ip"
sleep 3
i=0
for space in ${maas_spaces[*]} ; do
fabric_id=$(maas admin fabrics read | jq -c ".[] | {id:.id, vlan:.vlans[].vid, fabric:.name}" | grep fabric-0 | jq ".id")
space_object=$(maas ${maas_profile} spaces create name=${space})
echo $space_object | jq .
space_id=$(echo $space_object | jq ".id")
vlan_object=$(maas ${maas_profile} vlans create fabric-0 vid=${maas_vlans[$i]} space=${space_id})
echo $vlan_object | jq .
vlan_id=$(echo $vlan_object | jq ".id")
subnet_id=$(maas $maas_profile subnets read | jq -rc --arg maas_ip "${maas_subnets[$i]}" '.[] | select(.name | contains($maas_ip)) | "\(.id)"')
maas ${maas_profile} subnet update $subnet_id vlan=${vlan_id}
maas $maas_profile ipranges create type=dynamic start_ip="$maas_subnet_start" end_ip="$maas_subnet_end" comment='This is the reserved range for MAAS nodes'
maas_int_id=$(maas ${maas_profile} interfaces read ${maas_system_id} | jq -rc --arg int_ip "${maas_subnets[$i]}" '.[] | select(.links[].subnet.name | contains($int_ip)) | "\(.id)"')
maas ${maas_profile} interface update ${maas_system_id} ${maas_int_id} vlan=${vlan_id}
if [[ $space != "external" ]] ; then
maas ${maas_profile} ipranges create type=dynamic start_ip="${maas_subnets[$i]}.${maas_dhcp_start_postfix}" end_ip="${maas_subnets[$i]}.${maas_dhcp_end_postfix}"
maas $maas_profile vlan update fabric-0 ${maas_vlans[$i]} dhcp_on=True primary_rack="$maas_system_id"
# Force MAAS to manage all subnets except for external
maas $maas_profile subnet update $subnet_id managed=False
maas $maas_profile subnet update $subnet_id managed=True
fi
(( i++ ))
done
sleep 3
maas $maas_profile vlan update fabric-1 0 dhcp_on=True primary_rack="$maas_system_id"
if [[ $maas_pkg_type == "deb" ]]; then
# This is needed, because it points to localhost by default and will fail to
# commission/deploy in this state
echo "DEBUG: ${maas_endpoint}"
echo "DEBUG: http://$maas_bridge_ip:5240/MAAS/"
sudo debconf-set-selections maas.debconf
sleep 2
# sudo maas-rack config --region-url "${maas_endpoint}" && sudo service maas-rackd restart
# sudo maas-rack config --region-url "http://$maas_bridge_ip:5240/MAAS/" && sudo service maas-rackd restart
sudo DEBIAN_FRONTEND=noninteractive dpkg-reconfigure maas-rack-controller
sleep 2
@@ -220,11 +159,14 @@ build_maas() {
}
bootstrap_maas() {
# Import the base images; this can take some time
echo "Importing boot images, please be patient, this may take some time..."
maas $maas_profile boot-resources import
until [ "$(maas $maas_profile boot-resources is-importing)" = false ]; do sleep 3; done;
# Add a chassis with nodes we want to build against
[[ -n "$virsh_chassis" ]] && maas $maas_profile machines add-chassis chassis_type=virsh prefix_filter=maas-node hostname="$virsh_chassis"
maas $maas_profile machines add-chassis chassis_type=virsh prefix_filter=maas-node hostname="$virsh_chassis"
# This is necessary to allow MAAS to quiesce the imported chassis
echo "Pausing while chassis is imported..."
@@ -237,43 +179,13 @@ bootstrap_maas() {
# maas_node=$(maas $maas_profile machines read | jq -r '.[0].system_id')
# maas "$maas_profile" machine commission -d "$maas_node"
# Acquire all machines marked "Ready"
# maas $maas_profile machines allocate
# Acquire all images marked "Ready"
maas $maas_profile machines allocate
# Deploy the node you just commissioned and acquired
# maas "$maas_profile" machine deploy $maas_node
}
add_dns_records()
{
i=0
for dns_name in ${maas_dns_names[*]} ; do
add_records=""
dns_name_result=$(maas ${maas_profile} dnsresources read name=${dns_name}-internal)
if [[ -n $(echo $dns_name_result | jq .[]) ]] ; then
dns_id=$(echo $dns_name_result | jq .[].id)
dns_ip=$(maas ${maas_profile} dnsresource update ${dns_id} fqdn=${dns_name}-internal.example.com ip_addresses=${maas_ip_range}.${maas_dns_ips[$i]})
else
dns_ip=$(maas ${maas_profile} dnsresources create fqdn=${dns_name}-internal.example.com ip_addresses=${maas_ip_range}.${maas_dns_ips[$i]})
fi
dns_cname_result=$(maas ${maas_profile} dnsresource-records read rrtype=CNAME name=${dns_name})
if [[ -n $(echo $dns_cname_result | jq .[]) ]] ; then
dns_id=$(echo $dns_cname_result | jq .[].id)
dns_cname=$(maas ${maas_profile} dnsresource-record update ${dns_id} rrtype=cname rrdata=${dns_name}-internal.example.com.)
else
dns_cname=$(maas ${maas_profile} dnsresource-records create fqdn=${dns_name}.example.com rrtype=cname rrdata=${dns_name}-internal.example.com.)
fi
(( i++ ))
done
}
# These are for juju, adding a cloud matching the customer/reproducer we need
add_cloud() {
@@ -282,9 +194,7 @@ add_cloud() {
fi
rand_uuid=$(uuid -F siv)
cloud_name="$1"
if [ -f ~/.maas-api.key ]; then
maas_api_key=$(<~/.maas-api.key)
fi
maas_api_key=$(<~/.maas-api.key)
cat > clouds-"$rand_uuid".yaml <<EOF
clouds:
@@ -292,29 +202,19 @@ clouds:
type: maas
auth-types: [ oauth1 ]
description: MAAS cloud for $cloud_name
# endpoint: ${maas_endpoint:0:-8}
endpoint: $maas_endpoint
config:
enable-os-refresh-update: true
enable-os-upgrade: false
logging-config: <root>=DEBUG
EOF
if [[ -n "$package_repository" ]] ; then
cat >> clouds-"$rand_uuid".yaml <<EOF
apt-mirror: $package_repository
EOF
fi
# Only add the proxy stuff if its set
if [[ -n "$squid_proxy" ]] ; then
cat >> clouds-"$rand_uuid".yaml <<EOF
apt-http-proxy: $squid_proxy
apt-https-proxy: $squid_proxy
snap-http-proxy: $squid_proxy
snap-https-proxy: $squid_proxy
# snap-store-proxy: $snap_store_proxy
enable-os-refresh-update: true
enable-os-upgrade: false
logging-config: <root>=DEBUG
EOF
fi
cat > credentials-"$rand_uuid".yaml <<EOF
credentials:
@@ -327,49 +227,35 @@ EOF
cat > config-"$rand_uuid".yaml <<EOF
automatically-retry-hooks: true
mongo-memory-profile: default
default-series: $juju_bootstrap_series
transmit-vendor-metrics: false
EOF
# Only add the proxy stuff if its set
if [[ -n "$squid_proxy" ]] ; then
cat >> config-"$rand_uuid".yaml <<EOF
default-series: bionic
juju-ftp-proxy: $squid_proxy
juju-http-proxy: $squid_proxy
juju-https-proxy: $squid_proxy
juju-no-proxy: $no_proxy
apt-http-proxy: $squid_proxy
apt-https-proxy: $squid_proxy
transmit-vendor-metrics: false
EOF
fi
local="--local"
local="--client"
echo "Adding cloud............: $cloud_name"
# juju add-cloud --replace "$cloud_name" clouds-"$rand_uuid".yaml
juju update-cloud "$cloud_name" ${local} -f clouds-"$rand_uuid".yaml
juju update-cloud "$cloud_name" -f clouds-"$rand_uuid".yaml
echo "Adding credentials for..: $cloud_name"
#juju add-credential --replace "$cloud_name" -f credentials-"$rand_uuid".yaml
juju add-credential "$cloud_name" ${local} -f credentials-"$rand_uuid".yaml
juju add-credential "$cloud_name" -f credentials-"$rand_uuid".yaml
echo "Details for cloud.......: $cloud_name..."
juju clouds ${local} --format json | jq --arg cloud "$cloud_name" '.[$cloud]'
juju clouds --format json | jq --arg cloud "$cloud_name" '.[$cloud]'
juju bootstrap "$cloud_name" --debug --config=config-"$rand_uuid".yaml \
--model-default image-metadata-url=http://192.168.1.12/lxd/ \
--model-default agent-metadata-url=http://192.168.1.12/juju/tools/ \
--constraints "tags=juju"
juju bootstrap "$cloud_name" --debug --config=config-"$rand_uuid".yaml
# Since we created ephemeral files, let's wipe them out. Comment if you want to keep them around
if [[ $? = 0 ]]; then
rm -f clouds-"$rand_uuid".yaml credentials-"$rand_uuid".yaml config-"$rand_uuid".yaml
rm -f clouds-"$rand_uuid".yaml credentials-"$rand_uuid".yaml config-"$rand_uuid".yaml
fi
# Only enable HA if the variable is set and true
[[ -n "$juju_ha" ]] && [[ $juju_ha == "true" ]] && juju enable-ha --constraints "tags=juju"
juju enable-ha
juju machines -m controller
}
@@ -379,6 +265,7 @@ destroy_cloud() {
juju --debug clouds --format json | jq --arg cloud "$cloud_name" '.[$cloud]'
juju --debug remove-cloud "$cloud_name"
}
show_help() {
@@ -386,7 +273,6 @@ show_help() {
-a <cloud_name> Do EVERYTHING (maas, juju cloud, juju bootstrap)
-b Build out and bootstrap a new MAAS
-d Add DNS records for VIPs
-c <cloud_name> Add a new cloud + credentials
-i Just install the dependencies and exit
-j <name> Bootstrap the Juju controller called <name>
@@ -404,18 +290,18 @@ if [ $# -eq 0 ]; then
fi
# Load up some initial variables from the config and package arrays
read_config
init_variables
read_config
# This is the proxy that MAAS itself uses (the "internal" MAAS proxy)
no_proxy="localhost,127.0.0.1,$maas_system_ip,$(echo $maas_ip_range.{100..200} | sed 's/ /,/g')"
while getopts ":a:bc:dij:nt:r" opt; do
while getopts ":a:bc:ij:nt:r" opt; do
case $opt in
a )
check_bins
remove_maas_${maas_pkg_type}
install_maas_${maas_pkg_type}
remove_maas
install_maas
build_maas
bootstrap_maas
add_cloud "$OPTARG"
@@ -423,7 +309,7 @@ while getopts ":a:bc:dij:nt:r" opt; do
b )
echo "Building out a new MAAS server"
check_bins
install_maas_${maas_pkg_type}
install_maas
build_maas
bootstrap_maas
exit 0
@@ -433,13 +319,9 @@ while getopts ":a:bc:dij:nt:r" opt; do
init_variables
add_cloud "$OPTARG"
;;
d )
maas_login
add_dns_records
;;
i )
echo "Installing MAAS and PostgreSQL dependencies"
install_maas_${maas_pkg_type}
install_maas
exit 0
;;
j )
@@ -448,7 +330,7 @@ while getopts ":a:bc:dij:nt:r" opt; do
exit 0
;;
r )
remove_maas_${maas_pkg_type}
remove_maas
exit 0
;;
t )

View File

@@ -1,16 +0,0 @@
hypervisor_name=asrock01
hypervisor_ip=10.0.1.241
hypervisor_mac="a8:a1:59:44:70:ac"
hypervisor_tag="asrock"
ip_suffix=241
external_ip=192.168.1.211
compute="as1-maas-node"
# Disks that exist on the server
disk_name=()
disk_names+=(sda)
disk_names+=(nvme0n1)
#disk_names+=(nvme1n1)

View File

@@ -1,15 +0,0 @@
hypervisor_name=asrock02
hypervisor_ip=10.0.1.242
hypervisor_mac="a8:a1:59:44:76:79"
hypervisor_tag="asrock"
ip_suffix=242
external_ip=192.168.1.212
compute="as2-maas-node"
# Disks that exist on the server
disk_name=()
disk_names+=(sda)
disk_names+=(nvme0n1)

View File

@@ -1,17 +0,0 @@
hypervisor_name=asrock03
hypervisor_ip=10.0.1.243
hypervisor_mac="9c:6b:00:3f:98:29"
hypervisor_tag="asrock"
ip_suffix=243
external_ip=192.168.1.213
compute="as3-maas-node"
sunbeam_count=1
# Disks that exist on the server
disk_name=()
disk_names+=(sda)
disk_names+=(nvme0n1)

View File

@@ -1,15 +0,0 @@
hypervisor_name=asrock04
hypervisor_ip=10.0.1.244
hypervisor_mac="a8:a1:59:e4:92:b8"
hypervisor_tag="asrock"
ip_suffix=244
external_ip=192.168.1.214
compute="as4-maas-node"
# Disks that exist on the server
disk_name=()
disk_names+=(sda)
disk_names+=(nvme0n1)

View File

@@ -1,84 +0,0 @@
external_vlan=1
qemu_password="SgUoBByKjG9Lj78SwfWAHXD8DvgE67Cu"
virsh_user="virsh"
storage_path="/var/lib/libvirt/maas-images"
ceph_storage_path="/var/lib/libvirt/ceph-maas-images"
# Node prefix for all the nodes except for juju
compute="maas-node"
# To enable or disable uEFI for VMs
enable_uefi="false"
# To enable secureboot for VMs
# This in-advertantly enables uEFI
# EXPERIMENTAL/TESTING
enable_secureboot="false"
# Juju node count and definition
juju_count=1
juju_cpus=2
juju_ram=4096
juju_disk=20
# Control node count and definition
control_count=2
control_cpus=3
control_ram=16384
# Compute node definition
node_start=1
node_cpus=2
node_ram=4096
# Sunbeam node count and definition
sunbeam_count=0
sunbeam_cpus=3
sunbeam_ram=16384
# Disks to create on each VM
disks=()
disks+=(150)
disks+=(20)
disks+=(20)
# The network type, you have to options, bridge and network
# Bridge, will then assign each interface to the bridged identified in
# the bridges array.
# Network, will assign each interface to the virsh network that has been
# defined in the networks array.
network_type=bridge
bridge_type=ovs
# The vlans for each of the bridges if that is defined.
vlans=()
vlans+=(300)
vlans+=(301)
vlans+=(302)
vlans+=(303)
vlans+=(304)
vlans+=(305)
vlans+=(1)
# The bridges by default that will be used
bridges=()
bridges+=("br0")
bridges+=("br1")
bridges+=("br2")
bridges+=("br3")
bridges+=("br4")
bridges+=("br5")
bridges+=("br6")
# The subnets
subnets=()
subnets+=("10.0.1")
subnets+=("10.0.2")
subnets+=("10.0.3")
subnets+=("10.0.4")
subnets+=("10.0.5")
subnets+=("10.0.6")
subnets+=("192.168.1")

View File

@@ -1,30 +0,0 @@
hypervisor_name=hp01
hypervisor_ip=10.0.1.251
hypervisor_mac="e0:07:1b:ff:27:48"
hypervisor_tag="hp"
ip_suffix=251
storage_path="/var/lib/vz/maas"
ceph_storage_path="/var/lib/libvirt/maas"
external_ip=192.168.1.200
compute="hp1-maas-node"
enable_uefi="true"
juju_count=0
control_count=0
enable_tagging=false
bridges=()
bridges+=("br0")
bridges+=("br1")
bridges+=("br2")
bridges+=("br3")
bridges+=("br4")
bridges+=("br5")
bridges+=("lxdbr0")

View File

@@ -1,29 +0,0 @@
juju_version="3/stable"
juju_ha="true"
juju_bootstrap_series="jammy"
launchpad_user="arif-ali"
package_repository="http://192.168.1.12/archive.ubuntu.com/ubuntu/"
maas_bridge_ip="10.0.1.253"
maas_boot_source="http://192.168.1.12/maas/images/ephemeral-v3/stable/"
maas_endpoint="http://$maas_bridge_ip:5240/MAAS"
maas_kernel_opts="console=ttyS0,115200 console=tty0,115200 elevator=noop intel_iommu=on iommu=pt debug nosplash scsi_mod.use_blk_mq=1 dm_mod.use_blk_mq=1 enable_mtrr_cleanup mtrr_spare_reg_nr=1 systemd.log_level=info"
maas_ip_range="10.0.1"
maas_pass="openstack"
maas_profile="admin"
maas_system_ip="192.168.1.22"
maas_upstream_dns="192.168.1.13"
maas_user="admin"
maas_version="3.4"
#no_proxy="localhost,127.0.0.1,$maas_system_ip"
#squid_proxy="http://192.168.1.23:3128"
maas_pkg_type="snap"
maas_api_key="dSPcjd62cqS8USHE8q:SYjwagWHgUFvPwVBBx:mq8mqau4fWYZ2jtJPjk8Lj3W8Mtr7ZuJ"
maas_subnet_start="10.0.1.1"
maas_subnet_end="10.0.1.99"
maas_dhcp_start_postfix="1"
maas_dhcp_end_postfix="99"
maas_spaces=(oam ceph-access ceph-replica overlay admin internal external)
maas_subnets=(10.0.1 10.0.2 10.0.3 10.0.4 10.0.5 10.0.6 192.168.1 )
maas_vlans=(300 301 302 303 304 305 1)
maas_dns_names=(aodh cinder dashboard glance heat keystone mysql neutron nova gnocchi contrail vault placement)
maas_dns_ips=(211 212 213 214 215 216 217 218 219 220 221 222 223)

21
default.config Normal file
View File

@@ -0,0 +1,21 @@
storage_path="/storage/images/maas"
compute="maas-node"
control_count=0
node_count=20
node_start=1
node_cpus=4
node_ram=4096
disks=()
disks+=(50)
disks+=(100)
disks+=(100)
network_type=network
networks=()
networks+=("maas")
networks+=("maas")

View File

@@ -1,22 +0,0 @@
qemu_connection="qemu+ssh://virsh@10.0.1.253/system"
qemu_password="seBGtkWFKZuFUFgig8NYU5uh"
storage_path="/var/lib/libvirt/maas-images"
compute="maas-node"
control_count=1
control_cpus=3
control_ram=8192
node_start=1
node_cpus=2
node_ram=4096
network_type=bridge
bridges=()
bridges+=("br0")
bridges+=("br1")
bridges+=("br2")
bridges+=("br3")
bridges+=("br4")

View File

@@ -1,20 +0,0 @@
qemu_connection="qemu+ssh://virsh@10.0.1.253/system"
qemu_password="seBGtkWFKZuFUFgig8NYU5uh"
storage_path="/var/lib/libvirt/maas-images"
compute="maas-node"
control_count=1
control_cpus=3
control_ram=8192
node_count=11
node_start=1
node_cpus=2
node_ram=4096
network_type=network
networks=()
networks+=("maas")
networks+=("maas")

View File

@@ -1,253 +0,0 @@
#!/bin/bash
# how long you want to wait for commissioning
# default is 1200, i.e. 20 mins
state_timeout=1200
install_deps()
{
# Install some of the dependent packages
deps="jq"
if [[ "$0" =~ "manage-vm-nodes" ]] ; then
deps+=" virtinst"
fi
sudo apt -y update && sudo apt -y install ${deps}
# We install the snap, as maas-cli is not in distributions, this ensures
# that the package we invoke would be consistent
sudo snap install maas --channel=${maas_version}/stable
}
pkg_cleanup()
{
sudo snap remove maas maas-cli lxd
}
# Ensures that any dependent packages are installed for any MAAS CLI commands
# This also logs in to MAAS, and sets up the admin profile
maas_login()
{
# Login to MAAS using the API key and the endpoint
login=$(echo ${maas_api_key} | maas login ${maas_profile} ${maas_endpoint} -)
}
# Grabs the unique system_id for the host human readable hostname
maas_system_id()
{
node_name=$1
maas ${maas_profile} machines read hostname=${node_name} | jq -r ".[].system_id"
}
# Based on the nodename, finds the pod id, if it exists
maas_pod_id()
{
node_name=$1
maas ${maas_profile} pods read | jq -c ".[] | {pod_id:.id, hyp_name:.name}" | \
grep ${node_name} | jq -r ".pod_id"
}
machine_add_tag()
{
system_id=$1
tag=$2
[[ -n "$enable_tagging" ]] && [[ $enable_tagging == "false" ]] && return
# If the tag doesn't exist, then create it
err='No Tag matches the given query.'
read_tag=$(maas ${maas_profile} tag read ${tag})
if [[ ${read_tag} == "Not Found" ]] || [[ ${read_tag} == ${err}]]; then
case $tag in
"pod-console-logging")
kernel_opts="console=tty1 console=ttyS0"
;;
*)
kernel_opts=""
;;
esac
tag_create=$(maas ${maas_profile} tags create name=${tag} kernel_opts=${kernel_opts})
fi
# Assign the tag to the machine
tag_update=$(maas ${maas_profile} tag update-nodes ${tag} add=${system_id})
}
machine_set_zone()
{
system_id=$1
zone=$2
if [[ $(maas ${maas_profile} zone read ${zone}) == "Not Found" ]] ; then
zone_create=$(maas ${maas_profile} zones create name=${zone})
fi
zone_set=$(maas ${maas_profile} machine update ${system_id} zone=${zone})
}
# This takes the system_id, and ensures that the machine is in $state state
# You may want to tweak the commission_timeout above in somehow it's failing
# and needs to be done quicker
ensure_machine_in_state()
{
system_id=$1
state=$2
# TODO: add a $3 to be able to customise the timeout
# timout= if [[ $3 == "" ]] ; then state_timeout else $3 ; fi
timeout=${state_timeout}
# The epoch time when this part started
time_start=$(date +%s)
# variable that will be used to check against for the timeout
time_end=${time_start}
# The initial state of the system
status_name=$(maas ${maas_profile} machine read ${system_id} | jq -r ".status_name")
# We will continue to check the state of the machine to see if it is in
# $state or the timeout has occured, which defaults to 20 mins
while [[ ${status_name} != "${state}" ]] && [[ $(( ${time_end} - ${time_start} )) -le ${timeout} ]]
do
# Check every 20 seconds of the state
sleep 20
# Grab the current state
status_name=$(maas ${maas_profile} machine read ${system_id} | jq -r ".status_name")
# Grab the current time to compare against
time_end=$(date +%s)
done
}
# Adds the VM into MAAS
maas_add_node()
{
node_name=$1
mac_addr=$2
node_type=$3
machine_type="vm"
[[ $node_type == "physical" ]] && machine_type="$node_type"
if [[ $machine_type == "vm" ]] ; then
power_type="virsh"
power_params="power_parameters_power_id=${node_name}"
power_params+=" power_parameters_power_address=qemu+ssh://${virsh_user}@${hypervisor_ip}/system"
power_params+=" power_parameters_power_pass=${qemu_password}"
else
power_type="manual"
power_params=""
fi
# Check if the system already exists
system_id=$(maas_system_id ${node_name})
# This command creates the machine in MAAS, if it doesn't already exist.
# This will then automatically turn the machines on, and start
# commissioning.
if [[ -z "$system_id" ]] ; then
machine_create=$(maas ${maas_profile} machines create \
hostname=${node_name} \
mac_addresses=${mac_addr} \
architecture=amd64/generic \
power_type=${power_type} ${power_params})
system_id=$(echo $machine_create | jq -r .system_id)
ensure_machine_in_state ${system_id} "Ready"
maas_assign_networks ${system_id}
else
boot_int=$(maas ${maas_profile} machine read ${system_id} | jq ".boot_interface | {mac:.mac_address, int_id:.id}")
if [[ $mac_addr != "$(echo $boot_int | jq .mac | sed s/\"//g)" ]] ; then
# A quick hack so that we can change the mac address of the interface.
# The machine needs to be broken, ready or allocated.
hack_commission=$(maas $maas_profile machine commission ${system_id})
hack_break=$(maas $maas_profile machine mark-broken ${system_id})
int_update=$(maas $maas_profile interface update ${system_id} $(echo $boot_int | jq -r .int_id) mac_address=${mac_addr})
fi
machine_power_update=$(maas ${maas_profile} machine update ${system_id} \
power_type=${power_type} ${power_params})
commission_node ${system_id}
fi
machine_add_tag ${system_id} ${node_type}
machine_set_zone ${system_id} ${hypervisor_name}
[[ $machine_type == "vm" ]] && machine_add_tag ${system_id} "pod-console-logging"
maas_create_partitions ${system_id}
}
add_dns_record()
{
record=$1
domain=$2
ip_addr=$3
domain_entry=$(add_domain $domain)
domain_id=$(echo $domain_entry | jq .id)
maas admin dnsresources read | jq -rc --arg record "landscape-internal" '.[] | select(.fqdn | contains($record)) |{fqdn:.fqdn,ip:.ip_addresses[].ip}'
}
add_domain()
{
domain=$1
domain_entry=$(maas ${maas_profile} domains read | jq -rc --arg domainname "${domain}" '.[] | select(.name == $domainname)')
[[ -z $domain_exists ]] && domain_entry=$(maas ${maas_profile} domains create name="${domain}" authoritative=true)
echo $domain_entry
}
commission_node()
{
system_id=$1
commission_machine=$(maas ${maas_profile} machine commission ${system_id})
# Ensure that the machine is in ready state before the next step
ensure_machine_in_state ${system_id} "Ready"
maas_assign_networks ${system_id}
}
read_configs()
{
configs=""
configs+=" configs/default.config"
configs+=" configs/maas.config"
if [[ "$0" =~ "manage-vm-nodes" ]] ; then
configs+=" configs/hypervisor.config"
fi
for config in $configs ; do
read_config $config
done
# Dynamically generate the node count
# The amount of memory add on 10% then divide by node_ram then add 1
# For a 32GB machine we'll get 10 VMs altogether
# 1 x 4GB juju, 1 x 8GB controler, 8 x 4GB compute
# The juju VM is not included in the count
node_count=$(( (( `cat /proc/meminfo | grep -i memtotal | awk '{print $2}'` - ( ${control_count} * ${control_ram} * 1024 )) * 11 / 10) / 1024 / ${node_ram} + (7*7/10) ))
}
read_config()
{
config=$1
if [ ! -f $config ]; then
printf "Error: missing config file. Please create the file '$config'.\n"
exit 1
else
shopt -s extglob
source "$config"
fi
}

32
hp01.config Normal file
View File

@@ -0,0 +1,32 @@
hypervisor_ip=10.0.1.252
hypervisor_mac="e0:07:1b:ff:27:48"
qemu_connection="qemu+ssh://virsh@${hypervisor_ip}/system"
qemu_password="5b5Bnev4kh3QeDGQ4KK8bVtX"
storage_path="/var/lib/vz/maas"
compute="hp1-maas-node"
control_count=1
control_cpus=3
control_ram=8192
node_count=7
node_start=1
node_cpus=2
node_ram=4096
disks=()
disks+=(50)
disks+=(20)
disks+=(20)
network_type=bridge
bridges=()
bridges+=("br0")
bridges+=("br1")
bridges+=("br2")
bridges+=("br3")
bridges+=("br4")

1
hypervisor.config Symbolic link
View File

@@ -0,0 +1 @@
asrock01.config

View File

@@ -1,246 +1,205 @@
#!/bin/bash
# set -x
. functions.sh
. default.config
. maas.config
. hypervisor.config
# how long you want to wait for commissioning
# default is 1200, i.e. 20 mins
commission_timeout=1200
# Time between building VMs
build_fanout=60
# Adds all the subnets, vlans and therefore bridges to the hypervisor, all
# based on the configuration from hypervisor.config and/or default.config
# Ensures that any dependent packages are installed for any MAAS CLI commands
# This also logs in to MAAS, and sets up the admin profile
maas_login()
{
# Install some of the dependent packages
sudo apt -y update && sudo apt -y install jq bc virtinst
# We install the snap, as maas-cli is not in distributions, this ensures
# that the package we invoke would be consistent
sudo snap install maas --channel=2.8/stable
# Login to MAAS using the API key and the endpoint
echo ${maas_api_key} | maas login ${maas_profile} ${maas_endpoint} -
}
# Grabs the unique system_id for the host human readable hostname
maas_system_id()
{
node_name=$1
maas ${maas_profile} machines read hostname=${node_name} | jq ".[].system_id" | sed s/\"//g
}
maas_pod_id()
{
node_name=$1
maas ${maas_profile} pods read | jq ".[] | {pod_id:.id, hyp_name:.name}" --compact-output | \
grep ${node_name} | jq ".pod_id" | sed s/\"//g
}
# Adds the VM into MAAS
maas_add_node()
{
node_name=$1
mac_addr=$2
node_type=$3
# This command creates the machine in MAAS. This will then automatically
# turn the machines on, and start commissioning.
maas ${maas_profile} machines create \
hostname=${node_name} \
mac_addresses=${mac_addr} \
architecture=amd64/generic \
power_type=manual
# Grabs the system_id for th node that we are adding
system_id=$(maas_system_id ${node_name})
# This will ensure that the node is ready before we start manipulating
# other attributes.
ensure_machine_ready ${system_id}
# If the tag doesn't exist, then create it
if [[ $(maas ${maas_profile} tag read ${node_type}) == "Not Found" ]] ; then
maas ${maas_profile} tags create name=${node_type}
fi
# Assign the tag to the machine
maas ${maas_profile} tag update-nodes ${node_type} add=${system_id}
# Ensure that all the networks on the system have the Auto-Assign set
# so that the all the of the networks on the host have an IP automatically.
maas_assign_networks ${system_id}
}
# Attempts to auto assign all the networks for a host
maas_assign_networks()
{
system_id=$1
# Get the details of the physical interface
phsy_int=$(maas ${maas_profile} interfaces read ${system_id} | jq -c ".[] | {id:.id, name:.name, mac:.mac_address, parent:.parents}" | grep "${hypervisor_mac}.*parent.*\[\]")
phys_int_name=$(echo $phsy_int | jq -r .name)
phys_int_id=$(echo $phsy_int | jq -r .id)
phsy_int=$(maas ${maas_profile} interfaces read ${system_id} | jq ".[] | {id:.id, name:.name,parent:.parents}" --compact-output | grep "parent.*\[\]")
phys_int_name=$(echo $phsy_int | jq .name | sed s/\"//g)
phys_int_id=$(echo $phsy_int | jq .id | sed s/\"//g)
i=0
for vlan in ${vlans[*]}
do
subnet_line=$(maas admin subnets read | jq -rc --arg vlan "$vlan" ".[] | select(.vlan.vid == $vlan) | select(.name | contains(\"/24\"))| {subnet_id:.id, vlan_id:.vlan.id, cidr: .cidr}")
maas_vlan_id=$(echo $subnet_line | jq -r .vlan_id)
maas_subnet_id=$(echo $subnet_line | jq -r .subnet_id)
sub_prefix=$(echo $subnet_line | jq -r .cidr | sed 's/0\/24//g')
ip_addr=""
subnet_line=$(maas admin subnets read | jq ".[] | {subnet_id:.id, vlan:.vlan.vid, vlan_id:.vlan.id}" --compact-output | grep "vlan\":$vlan,")
maas_vlan_id=$(echo $subnet_line | jq .vlan_id | sed s/\"//g)
maas_subnet_id=$(echo $subnet_line | jq .subnet_id | sed s/\"//g)
if [[ $i -eq 0 ]] ; then
# Set the first interface to be static as per the configuration so that it
# consistent over re-provisioning of the system
vlan_int_id=${phys_int_id}
mode="STATIC"
ip_addr="ip_address=$hypervisor_ip"
else
# Check to see if the vlan interface already exists, otherwise create it
vlan_int_id=$(maas ${maas_profile} interfaces read ${system_id} | jq --argjson vlan ${vlan} '.[] | select(.vlan.vid == $vlan) | select(.type == "vlan") | .id')
if [[ -z "$vlan_int_id" ]] ; then
vlan_int=$(maas ${maas_profile} interfaces create-vlan ${system_id} vlan=${maas_vlan_id} parent=$phys_int_id)
vlan_int_id=$(echo $vlan_int | jq -r .id)
fi
vlan_int=$(maas ${maas_profile} interfaces create-vlan ${system_id} vlan=${maas_vlan_id} parent=$phys_int_id)
vlan_int_id=$(echo $vlan_int | jq .id | sed s/\"//g)
if [[ $vlan -eq $external_vlan ]] ; then
# Set the external IP to be static as per the configuration
mode="STATIC"
ip_addr="ip_address=$external_ip"
mode="DHCP"
else
# Set everything else to be auto assigned
mode="STATIC"
ip_addr="ip_address=${sub_prefix}${ip_suffix}"
mode="AUTO"
fi
ip_addr=""
fi
# Check to see if the bridge interface already exists, otherwise create it
bridge_int=$(maas ${maas_profile} interfaces read ${system_id} | jq --argjson vlan ${vlan} '.[] | select(.vlan.vid == $vlan) | select(.type == "bridge")')
[[ -z "${bridge_int}" ]] && bridge_int=$(maas ${maas_profile} interfaces create-bridge ${system_id} name=${bridges[$i]} vlan=$maas_vlan_id mac_address=${hypervisor_mac} parent=$vlan_int_id bridge_type=${bridge_type})
bridge_int_id=$(echo $bridge_int | jq -r .id)
cur_mode=$(echo $bridge_int | jq -r ".links[].mode")
# If the mode is already set correctly, then move on
[[ $cur_mode == "auto" ]] && [[ $mode == "AUTO" ]] && continue
#bridge_unlink=$(maas ${maas_profile} interface unlink-subnet $system_id $bridge_int_id id=$( echo $bridge_int_id | jq {maas_subnet_id})
bridge_int=$(maas ${maas_profile} interfaces create-bridge ${system_id} name=${bridges[$i]} vlan=$maas_vlan_id mac_address=${hypervisor_mac} parent=$vlan_int_id)
bridge_int_id=$(echo $bridge_int | jq .id | sed s/\"//g)
bridge_link=$(maas ${maas_profile} interface link-subnet $system_id $bridge_int_id mode=${mode} subnet=${maas_subnet_id} ${ip_addr})
echo $bridge_link
(( i++ ))
done
}
maas_create_partitions()
# This takes the system_id, and ensures that the machine is uin Ready state
# You may want to tweak the commission_timeout above in somehow it's failing
# and needs to be done quicker
ensure_machine_ready()
{
system_id=$1
disks=$(maas ${maas_profile} block-devices read ${system_id})
size=20
actual_size=$(( $size * 1024 * 1024 * 1024 ))
boot_disk=$(echo $disks | jq ".[] | select(.name == \"${disk_names[0]}\") | .id")
set_boot_disk=$(maas ${maas_profile} block-device set-boot-disk ${system_id} ${boot_disk})
storage_layout=$(maas ${maas_profile} machine set-storage-layout ${system_id} storage_layout=lvm vg_name=${hypervisor_name} lv_name=root lv_size=${actual_size} root_disk=${boot_disk})
vg_device=$(echo $storage_layout | jq ".volume_groups[].id" )
remaining_space=$(maas ${maas_profile} volume-group read ${system_id} ${vg_device} | jq -r ".available_size")
libvirt_lv=$(maas ${maas_profile} volume-group create-logical-volume ${system_id} ${vg_device} name=libvirt size=${remaining_space})
libvirt_block_id=$(echo ${libvirt_lv} | jq -r .id)
stg_fs=$(maas ${maas_profile} block-device format ${system_id} ${libvirt_block_id} fstype=ext4)
stg_mount=$(maas ${maas_profile} block-device mount ${system_id} ${libvirt_block_id} mount_point=${ceph_storage_path})
for ((disk=1;disk<${#disk_names[@]};disk++)); do
disk_id=$(echo $disks | jq -r ".[] | select(.name == \"${disk_names[$disk]}\") | .id")
create_partition=$(maas ${maas_profile} partitions create ${system_id} ${disk_id})
part_id=$(echo $create_partition | jq -r .id)
if [[ $disk -eq 1 ]] ; then
vg_create=$(maas ${maas_profile} volume-groups create ${system_id} name=${hypervisor_name}-nvme block_device=${disk_id} partitions=${part_id})
vg_id=$(echo $vg_create | jq -r .id)
vg_size=$(echo $vg_create | jq -r .size)
else
vg_update=$(maas ${maas_profile} volume-group update ${system_id} ${vg_id} add_partitions=${part_id})
vg_size=$(echo $vg_update | jq -r .size)
fi
time_start=$(date +%s)
time_end=${time_start}
status_name=$(maas ${maas_profile} machine read ${system_id} | jq ".status_name" | sed s/\"//g)
while [[ ${status_name} != "Ready" ]] && [[ $( echo ${time_end} - ${time_start} | bc ) -le ${commission_timeout} ]]
do
sleep 20
status_name=$(maas ${maas_profile} machine read ${system_id} | jq ".status_name" | sed s/\"//g)
time_end=$(date +%s)
done
lv_create=$(maas admin volume-group create-logical-volume ${system_id} ${vg_id} name=images size=${vg_size})
lv_id=$(echo $lv_create | jq -r .id)
lv_fs=$(maas ${maas_profile} block-device format ${system_id} ${lv_id} fstype=ext4)
lv_mount=$(maas ${maas_profile} block-device mount ${system_id} ${lv_id} mount_point=${storage_path})
}
maas_add_pod()
{
pod_create=$(maas ${maas_profile} pods create power_address="qemu+ssh://${virsh_user}@${hypervisor_ip}/system" power_user="${virsh_user}" power_pass="${qemu_password}" type="virsh")
pod_id=$(echo $pod_create | jq -r .id)
pod_name=$(maas ${maas_profile} pod update ${pod_id} name=${hypervisor_name})
}
# Calls the functions that destroys and cleans up all the VMs
wipe_node() {
install_deps
maas_login
destroy_node
}
create_node() {
install_deps
maas_login
maas_add_node ${hypervisor_name} ${hypervisor_mac} physical
}
install_node() {
install_deps
maas_login
deploy_node
maas_add_pod
}
add_pod()
{
install_deps
maas_login
maas_add_pod
}
# Fixes all the networks on all the VMs
network_auto()
{
install_deps
maas_login
system_id=$(maas_system_id ${hypervisor_name})
maas_assign_networks ${system_id}
}
# Fixes all the networks on all the VMs
create_partitions()
{
install_deps
maas_login
system_id=$(maas_system_id ${hypervisor_name})
maas_create_partitions ${system_id}
}
# The purpose of this function is to stop, release the nodes and wipe the disks
destroy_node() {
pod_id=$(maas_pod_id ${hypervisor_name})
pod_delete=$(maas ${maas_profile} pod delete ${pod_id})
maas ${maas_profile} pod delete ${pod_id}
system_id=$(maas_system_id ${hypervisor_name})
machine_delete=$(maas ${maas_profile} machine delete ${system_id})
maas ${maas_profile} machine delete ${system_id}
}
deploy_node() {
system_id=$(maas_system_id ${hypervisor_name})
maas ${maas_profile} machine deploy ${system_id} user_data="$(base64 user-data.yaml)" > /dev/null
#maas ${maas_profile} machine deploy ${system_id} install_kvm=true user_data="$(base64 user-data.yaml)"
# Only return when the node has finised deploying
ensure_machine_in_state ${system_id} "Deployed"
maas ${maas_profile} machine deploy ${system_id} user_data="$(base64 user-data.yaml)"
# TODO: keep trying, until it gives a valid output
#until $(maas ${maas_profile} machine deploy ${system_id} install_kvm=true) ; do
# machine ${maas_profile} machine release ${system_id}
}
show_help() {
echo "
-a <node> Create and Deploy
-c <node> Creates Hypervisor
-d <node> Deploy Hypervisor
-k <node> Add Hypervisor as Pod
-n <node> Assign Networks
-p <node> Update Partitioning
-w <node> Removes Hypervisor
-c Creates Hypervisor
-w Removes Hypervisor
-i Install/Deploy Hypervisor
-a Create and Deploy
"
}
read_configs
while getopts ":c:w:d:a:k:n:p:" opt; do
while getopts ":cwdi" opt; do
case $opt in
c)
read_config "configs/$OPTARG.config"
create_node
;;
w)
read_config "configs/$OPTARG.config"
wipe_node
;;
d)
read_config "configs/$OPTARG.config"
i)
install_node
;;
a)
read_config "configs/$OPTARG.config"
create_node
install_node
;;
k)
read_config "configs/$OPTARG.config"
add_pod
;;
n)
read_config "configs/$OPTARG.config"
network_auto
;;
p)
read_config "configs/$OPTARG.config"
create_partitions
;;
\?)
printf "Unrecognized option: -%s. Valid options are:" "$OPTARG" >&2
show_help
exit 1
;;
: )
printf "Option -%s needs an argument.\n" "$OPTARG" >&2
show_help
echo ""
exit 1
esac
done

321
manage-maas-nodes.sh Executable file
View File

@@ -0,0 +1,321 @@
#!/bin/bash
# set -x
. default.config
. maas.config
. hypervisor.config
# Storage type
storage_format="raw"
# Models for nic and storage
nic_model="virtio"
stg_bus="scsi"
# how long you want to wait for commissioning
# default is 1200, i.e. 20 mins
commission_timeout=1200
# Time between building VMs
build_fanout=60
# Ensures that any dependent packages are installed for any MAAS CLI commands
# This also logs in to MAAS, and sets up the admin profile
maas_login()
{
# Install some of the dependent packages
sudo apt -y update && sudo apt -y install jq bc virtinst
# We install the snap, as maas-cli is not in distributions, this ensures
# that the package we invoke would be consistent
sudo snap install maas --channel=2.8/stable
# Login to MAAS using the API key and the endpoint
echo ${maas_api_key} | maas login ${maas_profile} ${maas_endpoint} -
}
# Grabs the unique system_id for the host human readable hostname
maas_system_id()
{
node_name=$1
maas ${maas_profile} machines read hostname=${node_name} | jq ".[].system_id" | sed s/\"//g
}
# Adds the VM into MAAS
maas_add_node()
{
node_name=$1
mac_addr=$2
node_type=$3
# This command creates the machine in MAAS. This will then automatically
# turn the machines on, and start commissioning.
maas ${maas_profile} machines create \
hostname=${node_name} \
mac_addresses=${mac_addr} \
architecture=amd64/generic \
power_type=virsh \
power_parameters_power_id=${node_name} \
power_parameters_power_address=${qemu_connection} \
power_parameters_power_pass=${qemu_password}
# Grabs the system_id for th node that we are adding
system_id=$(maas_system_id ${node_name})
# This will ensure that the node is ready before we start manipulating
# other attributes.
ensure_machine_ready ${system_id}
# If the tag doesn't exist, then create it
if [[ $(maas ${maas_profile} tag read ${node_type}) == "Not Found" ]] ; then
maas ${maas_profile} tags create name=${node_type}
fi
# Assign the tag to the machine
maas ${maas_profile} tag update-nodes ${node_type} add=${system_id}
# Ensure that all the networks on the system have the Auto-Assign set
# so that the all the of the networks on the host have an IP automatically.
maas_auto_assign_networks ${system_id}
}
# Attempts to auto assign all the networks for a host
maas_auto_assign_networks()
{
system_id=$1
# Grabs all the interfaces that are attached to the system
node_interfaces=$(maas ${maas_profile} interfaces read ${system_id} \
| jq ".[] | {id:.id, name:.name, mode:.links[].mode, subnet:.links[].subnet.id }" --compact-output)
# This for loop will go through all the interfaces and enable Auto-Assign
# on all ports
for interface in ${node_interfaces}
do
int_id=$(echo $interface | jq ".id" | sed s/\"//g)
subnet_id=$(echo $interface | jq ".subnet" | sed s/\"//g)
mode=$(echo $interface | jq ".mode" | sed s/\"//g)
if [[ $mode != "auto" ]] ; then
maas ${maas_profile} interface link-subnet ${system_id} ${int_id} mode="AUTO" subnet=${subnet_id}
fi
done
}
# Calls the 3 functions that creates the VMs
create_vms() {
maas_login
create_storage
build_vms
}
# This takes the system_id, and ensures that the machine is uin Ready state
# You may want to tweak the commission_timeout above in somehow it's failing
# and needs to be done quicker
ensure_machine_ready()
{
system_id=$1
time_start=$(date +%s)
time_end=${time_start}
status_name=$(maas ${maas_profile} machine read ${system_id} | jq ".status_name" | sed s/\"//g)
while [[ ${status_name} != "Ready" ]] && [[ $( echo ${time_end} - ${time_start} | bc ) -le ${commission_timeout} ]]
do
sleep 20
status_name=$(maas ${maas_profile} machine read ${system_id} | jq ".status_name" | sed s/\"//g)
time_end=$(date +%s)
done
}
# Calls the functions that destroys and cleans up all the VMs
wipe_vms() {
maas_login
destroy_vms
}
# Creates the disks for all the nodes
create_storage() {
for ((virt="$node_start"; virt<=node_count; virt++)); do
printf -v virt_node %s-%02d "$compute" "$virt"
# Create th directory where the storage files will be located
mkdir -p "$storage_path/$virt_node"
# For all the disks that are defined in the array, create a disk
for ((disk=0;disk<${#disks[@]};disk++)); do
/usr/bin/qemu-img create -f "$storage_format" \
"$storage_path/$virt_node/$virt_node-d$((${disk} + 1)).img" "${disks[$disk]}"G &
done
done
wait
}
# The purpose of this function is to stop, release the nodes and wipe the disks
# to save space, and then so that the machines in MAAS can be re-used
wipe_disks() {
for ((virt="$node_start"; virt<=node_count; virt++)); do
printf -v virt_node %s-%02d "$compute" "$virt"
system_id=$(maas_system_id ${virt_node})
# Release the machine in MAAS
maas ${maas_profile} machine release ${system_id}
# Ensure that the machine is in ready state before the next step
ensure_machine_ready ${system_id}
# Stop the machine if it is running
virsh --connect qemu:///system shutdown "$virt_node"
# Remove the disks
for ((disk=0;disk<${#disks[@]};disk++)); do
rm -rf "$storage_path/$virt_node/$virt_node-d$((${disk} + 1)).img" &
done
done
# Re-create the storage again from scratch
create_storage
wait
}
# Builds the VMs from scratch, and then adds them to MAAS
build_vms() {
for ((virt="$node_start"; virt<=node_count; virt++)); do
printf -v virt_node %s-%02d "$compute" "$virt"
# Based on the variables in hypervisor.config, we define the variables
# for ram and cpus. This also allows a number of control nodes that
# can be defined as part of full set of nodes.
ram="$node_ram"
vcpus="$node_cpus"
node_type="compute"
if [[ $virt -le $control_count ]] ; then
ram="$control_ram"
vcpus="$control_cpus"
node_type="control"
fi
bus=$stg_bus
# Based on the bridges array, it will generate these amount of MAC
# addresses and then create the network definitions to add to
# virt-install
macaddr=()
network_spec=""
# Based on the type of network we are using we will assign variables
# such that this can be either bridge or network type
if [[ $network_type == "bridge" ]] ; then
net_prefix="bridge"
net_type=(${bridges[@]})
elif [[ $network_type == "network" ]] ; then
net_prefix="network"
net_type=(${networks[@]})
fi
# Now define the network definition
for ((mac=0;mac<${#net_type[@]};mac++)); do
macaddr+=($(printf '52:54:00:%02x:%02x:%02x\n' "$((RANDOM%256))" "$((RANDOM%256))" "$((RANDOM%256))"))
network_spec+=" --network=$net_prefix="${net_type[$mac]}",mac="${macaddr[$mac]}",model=$nic_model"
done
# Based on the disks array, it will create a definition to add these
# disks to the VM
disk_spec=""
for ((disk=0;disk<${#disks[@]};disk++)); do
disk_spec+=" --disk path=$storage_path/$virt_node/$virt_node-d$((${disk} + 1)).img"
disk_spec+=",format=$storage_format,size=${disks[$disk]},bus=$bus,io=native,cache=directsync"
done
# Creates the VM with all the attributes given
virt-install -v --noautoconsole \
--print-xml \
--autostart \
--boot network,hd,menu=on \
--video qxl,vram=256 \
--channel spicevmc \
--name "$virt_node" \
--ram "$ram" \
--vcpus "$vcpus" \
--os-variant "ubuntu18.04" \
--console pty,target_type=serial \
--graphics spice,clipboard_copypaste=no,mouse_mode=client,filetransfer_enable=off \
--cpu host-passthrough,cache.mode=passthrough \
--controller "$bus",model=virtio-scsi,index=0 \
$disk_spec \
$network_spec > "$virt_node.xml" &&
# Create the Vm based on the XML file defined in the above command
virsh define "$virt_node.xml"
# Start the VM
virsh start "$virt_node" &
# Call the maas_add_node function, this will add the node to MAAS
maas_add_node ${virt_node} ${macaddr[0]} ${node_type} &
# Wait some time before building the next, this helps with a lot of DHCP requests
# and ensures that all VMs are commissioned and deployed.
sleep ${build_fanout}
done
wait
}
destroy_vms() {
for ((node="$node_start"; node<=node_count; node++)); do
printf -v virt_node %s-%02d "$compute" "$node"
# If the domain is running, this will complete, else throw a warning
virsh --connect qemu:///system destroy "$virt_node"
# Actually remove the VM
virsh --connect qemu:///system undefine "$virt_node"
# Remove the three storage volumes from disk
for ((disk=0;disk<${#disks[@]};disk++)); do
virsh vol-delete --pool "$virt_node" "$virt_node-d$((${disk} + 1)).img"
done
# Remove the folder storage is located
rm -rf "$storage_path/$virt_node/"
sync
# Remove the XML definitions for the VM
rm -f "$virt_node.xml" \
"/etc/libvirt/qemu/$virt_node.xml" \
"/etc/libvirt/storage/$virt_node.xml" \
"/etc/libvirt/storage/autostart/$virt_node.xml"
# Now remove the VM from MAAS
system_id=$(maas_system_id ${virt_node})
maas ${maas_profile} machine delete ${system_id}
done
}
show_help() {
echo "
-c Creates everything
-w Removes everything
-d Releases VMs, Clears Disk
"
}
while getopts ":cwd" opt; do
case $opt in
c)
create_vms
;;
w)
wipe_vms
;;
d)
wipe_disks
;;
\?)
printf "Unrecognized option: -%s. Valid options are:" "$OPTARG" >&2
show_help
exit 1
;;
esac
done

View File

@@ -1,605 +0,0 @@
#!/bin/bash
# set -x
. functions.sh
# Storage type
storage_format="raw"
# Models for nic and storage
nic_model="virtio"
stg_bus="scsi"
# Time between building VMs
build_fanout=60
maas_assign_networks()
{
maas_auto_assign_networks $1
}
# Attempts to auto assign all the networks for a host
# Note: This only works straight after a commissioning of a machine
maas_auto_assign_networks()
{
system_id=$1
# Grabs all the interfaces that are attached to the system
node_interfaces=$(maas ${maas_profile} interfaces read ${system_id} \
| jq -c ".[] | {id:.id, name:.name, mode:.links[].mode, subnet:.links[].subnet.id, vlan:.vlan.vid }")
# This for loop will go through all the interfaces and enable Auto-Assign
# on all ports
for interface in ${node_interfaces}
do
int_id=$(echo $interface | jq ".id" | sed s/\"//g)
subnet_id=$(echo $interface | jq ".subnet" | sed s/\"//g)
mode=$(echo $interface | jq ".mode" | sed s/\"//g)
vlan=$(echo $interface | jq ".vlan" | sed s/\"//g)
# Although the vlan would have been set the discovered vlan wouldn't,
# and therefore link[] and discovery[] list won't exist. So we grab
# the subnet details from subnets that have the vlan assigned/discovered
# at commissioning stage
if [[ $subnet_id == null ]] ; then
subnet_line=$(maas admin subnets read | jq -c ".[] | {subnet_id:.id, vlan:.vlan.vid, vlan_id:.vlan.id}" | grep "vlan\":$vlan,")
subnet_id=$(echo $subnet_line | jq -r .subnet_id)
fi
# If vlan is the external network, then we want to grab IP via DHCP
# from the external network. Other networks would be auto mode
if [[ $vlan -eq $external_vlan ]] && [[ $mode != "dhcp" ]]; then
new_mode="DHCP"
elif [[ $mode != "auto" ]] && [[ $mode != "dhcp" ]] ; then
new_mode="AUTO"
fi
# Then finally set link details for all the interfaces that haven't
# been configured already
if [[ $new_mode != "AUTO" ]] || [[ $new_mode != "DHCP" ]]; then
assign_network=$(maas ${maas_profile} interface link-subnet ${system_id} ${int_id} mode=${new_mode} subnet=${subnet_id})
fi
done
}
maas_create_partitions()
{
system_id=$1
vg_name="vg0"
declare -A parts
parts=(
["tmp"]="/tmp"
["var-tmp"]="/var/tmp"
["root"]="/"
)
declare -A part_size
part_size=(
["tmp"]=2
["var-tmp"]=2
["root"]="remaining"
)
# Wipe everything first
storage_layout=$(maas ${maas_profile} machine set-storage-layout ${system_id} storage_layout=blank)
# Grab the first disk, typically /dev/sda
blk_device=$(maas ${maas_profile} block-devices read ${system_id} | jq ".[] | select(.name == \"sda\")")
blk_device_id=$(echo $blk_device | jq -r .id)
# create /boot/efi partition, just in-case we are using a uEFI based VM
boot_size=512
size=$(( ${boot_size} * 1024 * 1024 ))
boot_part=$(maas ${maas_profile} partitions create ${system_id} ${blk_device_id} size=$size)
boot_part_id=$(echo $boot_part | jq -r .id)
boot_format=$(maas ${maas_profile} partition format ${system_id} ${blk_device_id} ${boot_part_id} fstype=fat32)
boot_mount=$(maas ${maas_profile} partition mount ${system_id} ${blk_device_id} ${boot_part_id} mount_point=/boot/efi)
# Create the volume group for the rest of the partitions
vg_part=$(maas ${maas_profile} partitions create ${system_id} ${blk_device_id})
vg_part_id=$(echo $vg_part | jq -r .id)
vg_create=$(maas ${maas_profile} volume-groups create ${system_id} name=${vg_name} partitions=${vg_part_id})
vg_id=$(echo $vg_create | jq -r .id)
for part in ${!parts[@]}; do
if [[ ${part_size[$part]} == "remaining" ]] ; then
size=$(maas ${maas_profile} volume-group read ${system_id} ${vg_id} | jq -r ".available_size")
else
size=$(( ${part_size[$part]} * 1024 * 1024 * 1024 ))
fi
lv_create=$(maas ${maas_profile} volume-group create-logical-volume ${system_id} ${vg_id} name=${part} size=${size})
lv_block_id=$(echo ${lv_create} | jq -r .id)
stg_fs=$(maas ${maas_profile} block-device format ${system_id} ${lv_block_id} fstype=ext4)
stg_mount=$(maas ${maas_profile} block-device mount ${system_id} ${lv_block_id} mount_point=${parts[$part]})
done
}
# Calls the 3 functions that creates the VMs
create_vms() {
install_deps
maas_login
create_storage
build_vms
}
# Calls the 3 functions that creates the VMs
create_juju() {
install_deps
maas_login
create_storage "juju"
build_vms "juju"
}
# Calls the functions that destroys and cleans up all the VMs
wipe_vms() {
install_deps
maas_login
destroy_vms
}
# Fixes all the networks on all the VMs
do_nodes()
{
install_deps
maas_login
function=$1
juju_total=1
for ((virt="$node_start"; virt<=node_count; virt++)); do
node_type="compute"
if [[ $virt -le $control_count ]] ; then
node_type="control"
fi
if [[ $juju_total -le $juju_count ]] ; then
printf -v virt_node %s-%02d "$hypervisor_name-juju" "$juju_total"
doing_juju="true"
node_type="juju"
(( virt-- ))
(( juju_total++ ))
else
printf -v virt_node %s-%02d "$compute" "$virt"
fi
system_id=$(maas_system_id ${virt_node})
status_name=$(maas ${maas_profile} machine read ${system_id} | jq -r ".status_name")
if [[ ${status_name} == "Deployed" ]] ; then
case $function in
"network"|"commission"|"partition")
echo "Skipping ${virt_node} ..."
continue
;;
esac
fi
echo "Setting up $function for $virt_node ..."
if [[ $function == "network" ]] ; then
maas_auto_assign_networks ${system_id} &
elif [[ $function == "zone" ]] ; then
machine_set_zone ${system_id} ${hypervisor_name} &
elif [[ $function == "commission" ]] ; then
commission_node ${system_id} &
sleep ${build_fanout}
elif [[ $function == "partition" ]] ; then
[[ $node_type == "juju" ]] && continue
maas_create_partitions ${system_id} &
elif [[ $function == "tag" ]] ; then
machine_add_tag ${system_id} ${node_type}
machine_add_tag ${system_id} ${hypervisor_name}
machine_add_tag ${system_id} ${hypervisor_tag}
fi
done
wait
}
# Creates the disks for all the nodes
create_storage() {
# To keep a track of how many juju VMs we have created
only_juju="false"
node_count_bak=$node_count
if [[ $1 == "juju" ]] ; then
node_count=0
if [[ $juju_count -lt 1 ]] ; then
echo "WARNING: requested only create juju, but juju_count = ${juju_count}"
return 0
fi
fi
for ((virt="$node_start"; virt<=node_count; virt++)); do
printf -v virt_node %s-%02d "$compute" "$virt"
# Create the directory where the storage files will be located
mkdir -p "$storage_path/$virt_node"
mkdir -p "$ceph_storage_path/$virt_node"
# For all the disks that are defined in the array, create a disk
for ((disk=0;disk<${#disks[@]};disk++)); do
if [[ $disk -eq 0 ]] ; then
final_storage_path=$storage_path
else
final_storage_path=$ceph_storage_path
fi
file_name="$final_storage_path/$virt_node/$virt_node-d$((${disk} + 1)).img"
if [[ ! -f $file_name ]] ; then
/usr/bin/qemu-img create -f "$storage_format" "${file_name}" "${disks[$disk]}"G &
fi
done
done
for ((juju=1; juju<=juju_count; juju++)); do
printf -v virt_node %s-%02d "$hypervisor_name-juju" "$juju"
# Create th directory where the storage files will be located
mkdir -p "$storage_path/$virt_node"
file_name="$storage_path/$virt_node/$virt_node.img"
if [[ ! -f $file_name ]] ; then
/usr/bin/qemu-img create -f "$storage_format" ${file_name} "${juju_disk}"G &
fi
done
node_count=$node_count_bak
wait
}
# The purpose of this function is to stop, release the nodes and wipe the disks
# to save space, and then so that the machines in MAAS can be re-used
wipe_disks() {
juju_total=1
doing_juju="false"
for ((virt="$node_start"; virt<=node_count; virt++)); do
if [[ $juju_total -le $juju_count ]] ; then
printf -v virt_node %s-%02d "$hypervisor_name-juju" "$juju_total"
doing_juju="true"
(( virt-- ))
(( juju_total++ ))
elif [[ $unbeam_total -le $sunbeam_count ]] ; then
printf -v virt_node %s-%02d "$hypervisor_name-sunbeam" "$sunbeam_total"
doing_sunbeam="true"
(( virt-- ))
(( sunbeam_total++ ))
else
printf -v virt_node %s-%02d "$compute" "$virt"
doing_juju="false"
fi
system_id=$(maas_system_id ${virt_node})
# Release the machine in MAAS
release_machine=$(maas ${maas_profile} machine release ${system_id})
# Ensure that the machine is in ready state before the next step
ensure_machine_in_state ${system_id} "Ready"
# Stop the machine if it is running
# It's probably stopped anyway as per the release above
virsh --connect qemu:///system shutdown "$virt_node"
# Remove the disks
if [[ $doing_juju == "true" ]] || [[ $doing_sunbeam == "true" ]] ; then
rm -rf "$storage_path/$virt_node/$virt_node.img"
rm -rf "$ceph_storage_path/$virt_node/$virt_node.img"
else
for ((disk=0;disk<${#disks[@]};disk++)); do
rm -rf "$storage_path/$virt_node/$virt_node-d$((${disk} + 1)).img" &
rm -rf "$ceph_storage_path/$virt_node/$virt_node-d$((${disk} + 1)).img" &
done
fi
done
# Re-create the storage again from scratch
create_storage
wait
}
machine_exists()
{
node_name=$1
virsh_machine=$(virsh list --all --name | grep ${node_name})
if [[ $virsh_machine != "" ]] ; then
macaddr=$(virsh domiflist ${node_name} | tail +3 | head -n 1 | awk '{print $5}')
echo $macaddr
else
echo "false"
fi
}
get_mac()
{
machine_exists $*
}
# Builds the VMs from scratch, and then adds them to MAAS
build_vms() {
# To keep a track of how many juju VMs we have created
juju_total=1
only_juju="false"
if [[ $1 == "juju" ]] ; then
only_juju="true"
if [[ $juju_count -lt 1 ]] ; then
echo "WARNING: requested only create juju, but juju_count = ${juju_count}"
return 0
fi
fi
for ((virt="$node_start"; virt<=node_count; virt++)); do
# Based on the bridges array, it will generate these amount of MAC
# addresses and then create the network definitions to add to
# virt-install
network_spec=""
extra_args=""
# Based on the type of network we are using we will assign variables
# such that this can be either bridge or network type
if [[ $network_type == "bridge" ]] ; then
net_prefix="bridge"
net_type=(${bridges[@]})
elif [[ $network_type == "network" ]] ; then
net_prefix="network"
net_type=(${networks[@]})
fi
if [[ $juju_total -le $juju_count ]] ; then
printf -v virt_node %s-%02d "$hypervisor_name-juju" "$juju_total"
ram="$juju_ram"
vcpus="$juju_cpus"
node_type="juju"
# Now define the network definition
network_spec="--network=$net_prefix="${net_type[0]}",model=$nic_model"
if [[ "${bridge_type}" == "ovs" ]] ; then
network_spec+=",virtualport_type=openvswitch"
fi
disk_spec="--disk path=$storage_path/$virt_node/$virt_node.img"
disk_spec+=",format=$storage_format,size=${juju_disk},bus=$stg_bus,io=native,cache=directsync"
# So that we have the right amount of VMs
(( virt-- ))
(( juju_total++ ))
# This will ensure that we only create the juju VMs
[[ $only_juju == "true" ]] && [[ $juju_total -gt $juju_count ]] && virt=$(( $node_count + 1 ))
elif [[ $sunbeam_total -le $sunbeam_count ]] ; then
printf -v virt_node %s-%02d "$hypervisor_name-sunbeam" "$sunbeam_total"
ram="$sunbeam_ram"
vcpus="$sunbeam_cpus"
node_type="sunbeam"
# Now define the network definition
network_spec="--network=$net_prefix="${net_type[0]}",model=$nic_model"
if [[ "${bridge_type}" == "ovs" ]] ; then
network_spec+=",virtualport_type=openvswitch"
fi
disk_spec="--disk path=$storage_path/$virt_node/$virt_node.img"
disk_spec+=",format=$storage_format,size=${juju_disk},bus=$stg_bus,io=native,cache=directsync"
# So that we have the right amount of VMs
(( virt-- ))
(( sunbeam_total++ ))
# This will ensure that we only create the juju VMs
[[ $only_sunbeam == "true" ]] && [[ $sunbeam_total -gt $sunbeam_count ]] && virt=$(( $node_count + 1 ))
else
printf -v virt_node %s-%02d "$compute" "$virt"
# Based on the variables in hypervisor.config, we define the variables
# for ram and cpus. This also allows a number of control nodes that
# can be defined as part of full set of nodes.
ram="$node_ram"
vcpus="$node_cpus"
node_type="compute"
disk_count=${#disks[@]}
if [[ $virt -le $control_count ]] ; then
ram="$control_ram"
vcpus="$control_cpus"
node_type="control"
disk_count=1
fi
# Now define the network definition
for ((net=0;net<${#net_type[@]};net++)); do
network_spec+=" --network=$net_prefix="${net_type[$net]}",model=$nic_model"
if [[ "${bridge_type}" == "ovs" ]] ; then
network_spec+=",virtualport_type=openvswitch"
fi
done
# Based on the disks array, it will create a definition to add these
# disks to the VM
disk_spec=""
for ((disk=0;disk<${disk_count};disk++)); do
if [[ $disk -eq 0 ]] ; then
final_storage_path=$storage_path
else
final_storage_path=$ceph_storage_path
fi
disk_spec+=" --disk path=$final_storage_path/$virt_node/$virt_node-d$((${disk} + 1)).img"
disk_spec+=",format=$storage_format,size=${disks[$disk]},bus=$stg_bus,io=native,cache=directsync"
done
fi
# Check to see if the libvirt machine already exists. If it exists
# then just use the same one again and commission in MAAS
check_machine=$(machine_exists ${virt_node})
if [[ $check_machine != "false" ]] ; then
macaddr=$check_machine
maas_add_node ${virt_node} ${macaddr} ${node_type} &
sleep ${build_fanout}
continue
fi
# For testing and WIP/POC
if [[ ${enable_secureboot} == "true" ]] ; then
extra_args+=" --boot loader_secure=yes"
#extra_args+=",loader=/usr/share/OVMF/OVMF_CODE.secboot.fd"
#extra_args+=",nvram_template=/usr/share/OVMF/OVMF_VARS.fd"
#extra_args+=",loader_ro=yes"
#extra_args+=",loader_type=pflash"
extra_args+=" --machine q35"
extra_args+=" --features smm=on"
enable_uefi="true"
fi
# Flags required to enable uEFI
[[ ${enable_uefi} == "true" ]] && extra_args+=" --boot uefi"
# Creates the VM with all the attributes given
virt-install -v --noautoconsole \
--print-xml \
--autostart \
--boot network,hd,menu=on \
--video qxl,vram=256 \
--channel spicevmc \
--name "$virt_node" \
--memory "memory=$(( $ram * 6 / 4 )),currentMemory=$ram" \
--vcpus "$vcpus" \
--console pty,target_type=serial \
--graphics spice,clipboard_copypaste=no,mouse_mode=client,filetransfer_enable=off \
--cpu host-passthrough,cache.mode=passthrough \
--controller "$stg_bus",model=virtio-scsi,index=0 \
--osinfo detect=on,require=off \
$extra_args $disk_spec \
$network_spec > "$virt_node.xml" &&
# Create the Vm based on the XML file defined in the above command
virsh define "$virt_node.xml"
macaddr=$(get_mac ${virt_node})
# Call the maas_add_node function, this will add the node to MAAS
maas_add_node ${virt_node} ${macaddr} ${node_type} &
# Wait some time before building the next, this helps with a lot of DHCP requests
# and ensures that all VMs are commissioned and deployed.
sleep ${build_fanout}
done
wait
}
destroy_vms() {
juju_total=1
doing_juju="false"
for ((virt="$node_start"; virt<=node_count; virt++)); do
if [[ $juju_total -le $juju_count ]] ; then
printf -v virt_node %s-%02d "$hypervisor_name-juju" "$juju_total"
doing_juju="true"
(( virt-- ))
(( juju_total++ ))
else
printf -v virt_node %s-%02d "$compute" "$virt"
doing_juju="false"
fi
# If the domain is running, this will complete, else throw a warning
virsh --connect qemu:///system destroy "$virt_node"
# Actually remove the VM
virsh --connect qemu:///system undefine "$virt_node" --nvram
# Remove the three storage volumes from disk
if [[ $doing_juju = "true" ]] ; then
virsh vol-delete --pool "$virt_node" "$virt_node.img"
else
for ((disk=0;disk<${#disks[@]};disk++)); do
virsh vol-delete --pool "$virt_node" "$virt_node-d$((${disk} + 1)).img"
done
fi
# Remove the folder storage is located
rm -rf "$storage_path/$virt_node/"
sync
# Remove the XML definitions for the VM
rm -f "$virt_node.xml" \
"/etc/libvirt/qemu/$virt_node.xml" \
"/etc/libvirt/storage/$virt_node.xml" \
"/etc/libvirt/storage/autostart/$virt_node.xml"
# Now remove the VM from MAAS
system_id=$(maas_system_id ${virt_node})
delete_machine=$(maas ${maas_profile} machine delete ${system_id})
done
}
show_help() {
echo "
-c Creates everything
-d Releases VMs, Clears Disk
-j Only create juju VM
-n Updates all the networks on all VMs
-p Update the partitioning of the nodes
-r Recommission all VMs
-t Re-tag all VMS
-w Removes everything
-z Add nodes to availability zones
"
}
# Initialise the configs
read_configs
while getopts ":cdjnprtwz" opt; do
case $opt in
c)
create_vms
do_nodes tag
;;
d)
install_deps
maas_login
wipe_disks
;;
j)
create_juju
;;
n)
do_nodes network
;;
p)
do_nodes partition
;;
r)
do_nodes commission
;;
t)
do_nodes tag
;;
w)
wipe_vms
;;
z)
do_nodes zone
;;
\?)
printf "Unrecognized option: -%s. Valid options are:" "$OPTARG" >&2
show_help
exit 1
;;
esac
done
# Cleanup
pkg_cleanup

View File

@@ -1,9 +0,0 @@
#!/bin/bash
. configs/maas.config
for subnet in ${maas_subnets[*]} ; do
maas "${maas_profile}" ipranges create type=reserved comment="Servers" start_ip="${subnet}.241" end_ip="${subnet}.254"
done
maas "${maas_profile}" ipranges create type=reserved comment="OpenStack VIPs" start_ip="${maas_ip_range}.211" end_ip="${maas_ip_range}.225"

21
rpi4-maas.config Normal file
View File

@@ -0,0 +1,21 @@
juju_version="latest/stable"
launchpad_user="arif-ali"
maas_bridge_ip="192.168.1.22"
maas_boot_source="http://"$maas_bridge_ip":8765/maas/images/ephemeral-v3/daily/"
maas_endpoint="http://$maas_bridge_ip:5240/MAAS"
maas_ip_range="10.0.1"
maas_local_proxy="http://$maas_bridge_ip:8000"
maas_pass="openstack"
maas_profile="admin"
maas_system_ip="192.168.1.10"
maas_upstream_dns="1.1.1.1 4.4.4.4 8.8.8.8"
maas_user="maas"
maas_version="2.4"
maas_api_key="z9cT7jE3BhmxcXkWWN:ew5WW9QdDMg8TXVnjt:NtKgJdfgA5FVw2YT9CnaKU87wJ5fTxKa"
maas_pkg_type="snap"
no_proxy="localhost,127.0.0.1,$maas_system_ip"
squid_proxy="http://192.168.100.10:3128"
virsh_user="ubuntu"
package_repository="http://$maas_bridge_ip:8765/mirror/ubuntu"
snap_store_proxy="$squid_proxy"
snap_store_assertions=""

View File

@@ -1,14 +1,7 @@
#cloud-config
ssh_import_id:
- lp:arif-ali
byobu_by_default: enable
timezone: "Europe/London"
# Allow ssh passwords
ssh_pwauth: True
- lp:arif-ali
## Update apt database and upgrade packages on first boot
package_update: true
@@ -17,104 +10,38 @@ package_upgrade: true
users:
- default
- name: virsh
lock_passwd: false
primary_group: virsh
groups: [ libvirt, libvirt-qemu ]
passwd: $6$SVOxUrhz9mNyscUJ$hKF0RMY1nkGC3BpiozpaznE3AWerd8Ac8AlV9YEpLx50bLw5zweFCuTEEdS04McJNlaIqA.E4HiPuaIYGMzlH/
shell: "/bin/rbash"
ssh_import_id:
- lp:arif-ali
## Install additional packages on first boot
packages:
- virtinst
- bc
- jq
- ksmtuned
- openvswitch-switch
snap:
commands:
- ['install', 'maas']
- qemu-kvm
- libvirt-bin
- qemu-efi
## Write arbitrary files to the file-system (including binaries!)
write_files:
- path: /root/initial_setup.sh
content: |
#!/bin/bash
- path: /root/initial_setup.sh
content: |
#!/bin/bash
git clone https://github.com/arif-ali/maas-autobuilder.git /root/maas-autobuilder
cd /root/maas-autobuilder/configs
ln -sf ${HOSTNAME}.config hypervisor.config
sed "s/^\(PasswordAuthentication\).*/\1 yes/g" /etc/ssh/sshd_config
systemctl restart sshd
cd ../
chmod +x manage-vm-nodes.sh
./manage-vm-nodes.sh -c
permissions: '0755'
owner: root:root
- path: /root/install_kvm.sh
content: |
#!/bin/bash
git clone https://github.com/arif-ali/maas-autobuilder.git /root/maas-autobuilder
cd /root/maas-autobuilder
ln -sf ${HOSTNAME}.config hypervisor.config
series=$(lsb_release -c -s)
if [[ $series == "bionic" ]] ; then
pkgs="qemu-kvm libvirt-bin qemu-efi"
else
pkgs="libvirt-daemon-system libvirt-clients"
fi
apt -y update
apt -y install $pkgs
mkdir -p /home/virsh/bin
ln -s /usr/bin/virsh /home/virsh/bin/virsh
sh -c echo "PATH=/home/virsh/bin" >> /home/virsh/.bashrc
systemctl restart sshd
usermod -a -G libvirt,libvirt-qemu virsh
permissions: '0755'
owner: root:root
- path: /etc/ssh/sshd_config.d/90-virsh-user.conf
content: |
Match user virsh
X11Forwarding no
AllowTcpForwarding no
PermitTTY no
ForceCommand nc -q 0 -U /var/run/libvirt/libvirt-sock
owner: root:root
- path: /root/wipe_disk.sh
content: |
#!/bin/bash
dd if=/dev/zero of=/dev/sda bs=1M count=1000
permissions: '0755'
owner: root:root
- path: /etc/netplan/99-custom.yaml
content: |
network:
version: 2
ethernets:
enp1s0:
wakeonlan: true
link-local: []
enp2s0:
wakeonlan: true
link-local: []
enp3s0:
wakeonlan: true
link-local: []
permissions: '0644'
owner: root:root
- path: /etc/sysctl.d/99-custom.conf
content: |
kernel.softlockup_all_cpu_backtrace=1
permissions: '0644'
owner: root:root
chmod +x manage-maas-nodes.sh
./manage-maas-nodes.sh -c
permissions: '0755'
owner: root:root
# Runs any command that we need to run post install
runcmd:
- [ "/root/install_kvm.sh" ]
- [ "rm", "/root/install_kvm.sh" ]
- [ "/root/initial_setup.sh" ]
- [ "rm", "/root/initial_setup.sh" ]
- [ "netplan", "apply" ]
- [ "sysctl", "-p", "--system"]
- [ "/root/initial_setup.sh" ]
- [ "efibootmgr", "-n", "0001"]