2
0
mirror of https://github.com/xcat2/confluent.git synced 2026-03-10 01:59:17 +00:00

Merge branch 'master' into webauthn

This commit is contained in:
Tinashe Kucherera
2024-09-12 11:40:20 -04:00
committed by GitHub
71 changed files with 2496 additions and 311 deletions

30
README.md Normal file
View File

@@ -0,0 +1,30 @@
# Confluent
![Python 3](https://img.shields.io/badge/python-3-blue.svg) [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://github.com/xcat2/confluent/blob/master/LICENSE)
Confluent is a software package to handle essential bootstrap and operation of scale-out server configurations.
It supports stateful and stateless deployments for various operating systems.
Check [this page](https://hpc.lenovo.com/users/documentation/whatisconfluent.html
) for a more detailed list of features.
Confluent is the modern successor of [xCAT](https://github.com/xcat2/xcat-core).
If you're coming from xCAT, check out [this comparison](https://hpc.lenovo.com/users/documentation/confluentvxcat.html).
# Documentation
Confluent documentation is hosted on hpc.lenovo.com: https://hpc.lenovo.com/users/documentation/
# Download
Get the latest version from: https://hpc.lenovo.com/users/downloads/
Check release notes on: https://hpc.lenovo.com/users/news/
# Open Source License
Confluent is made available under the Apache 2.0 license: https://opensource.org/license/apache-2-0
# Developers
Want to help? Submit a [Pull Request](https://github.com/xcat2/confluent/pulls).

View File

@@ -157,7 +157,7 @@ def main():
elif attrib.endswith('.ipv6_address') and val:
ip6bynode[node][currnet] = val.split('/', 1)[0]
elif attrib.endswith('.hostname'):
namesbynode[node][currnet] = re.split('\s+|,', val)
namesbynode[node][currnet] = re.split(r'\s+|,', val)
for node in ip4bynode:
mydomain = domainsbynode.get(node, None)
for ipdb in (ip4bynode, ip6bynode):

View File

@@ -88,6 +88,7 @@ for rsp in session.read('/noderange/{0}/configuration/management_controller/user
for node in databynode:
if 'error' in rsp['databynode'][node]:
print(node, ':', rsp['databynode'][node]['error'])
errorNodes.add(node)
continue
for user in rsp['databynode'][node]['users']:
if user['username'] == username:
@@ -97,6 +98,10 @@ for rsp in session.read('/noderange/{0}/configuration/management_controller/user
uid_dict[user['uid']] = uid_dict[user['uid']] + ',{}'.format(node)
break
if not uid_dict:
print("Error: Could not reach target node's bmc user")
sys.exit(1)
for uid in uid_dict:
success = session.simple_noderange_command(uid_dict[uid], 'configuration/management_controller/users/{0}'.format(uid), new_password, key='password', errnodes=errorNodes) # = 0 if successful

View File

@@ -303,9 +303,14 @@ else:
'/noderange/{0}/configuration/management_controller/extended/all'.format(noderange),
session, printbmc, options, attrprefix='bmc.')
if options.extra:
rcode |= client.print_attrib_path(
'/noderange/{0}/configuration/management_controller/extended/extra'.format(noderange),
session, printextbmc, options)
if options.advanced:
rcode |= client.print_attrib_path(
'/noderange/{0}/configuration/management_controller/extended/extra_advanced'.format(noderange),
session, printextbmc, options)
else:
rcode |= client.print_attrib_path(
'/noderange/{0}/configuration/management_controller/extended/extra'.format(noderange),
session, printextbmc, options)
if printsys or options.exclude:
if printsys == 'all':
printsys = []

View File

@@ -243,7 +243,7 @@ if options.windowed:
elif 'Height' in line:
window_height = int(line.split(':')[1])
elif '-geometry' in line:
l = re.split(' |x|-|\+', line)
l = re.split(' |x|-|\\+', line)
l_nosp = [ele for ele in l if ele.strip()]
wmxo = int(l_nosp[1])
wmyo = int(l_nosp[2])

View File

@@ -81,6 +81,12 @@ def main(args):
if not args.profile and args.network:
sys.stderr.write('Both noderange and a profile name are required arguments to request a network deployment\n')
return 1
if args.clear and args.profile:
sys.stderr.write(
'The -c/--clear option should not be used with a profile, '
'it is a request to not deploy any profile, and will clear '
'whatever the current profile is without being specified\n')
return 1
if extra:
sys.stderr.write('Unrecognized arguments: ' + repr(extra) + '\n')
c = client.Command()
@@ -166,8 +172,6 @@ def main(args):
','.join(errnodes)))
return 1
rc |= c.simple_noderange_command(args.noderange, '/power/state', 'boot')
if args.network and not args.prepareonly:
return rc
return 0
if __name__ == '__main__':

View File

@@ -1,12 +1,16 @@
%define name confluent_client
%define version #VERSION#
%define fversion %{lua:
sv, _ = string.gsub("#VERSION#", "[~+]", "-")
print(sv)
}
%define release 1
Summary: Client libraries and utilities for confluent
Name: %{name}
Version: %{version}
Release: %{release}
Source0: %{name}-%{version}.tar.gz
Source0: %{name}-%{fversion}.tar.gz
License: Apache2
Group: Development/Libraries
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
@@ -21,7 +25,7 @@ This package enables python development and command line access to
a confluent server.
%prep
%setup -n %{name}-%{version} -n %{name}-%{version}
%setup -n %{name}-%{fversion}
%build
%if "%{dist}" == ".el7"

View File

@@ -1,7 +1,11 @@
VERSION=`git describe|cut -d- -f 1`
NUMCOMMITS=`git describe|cut -d- -f 2`
if [ "$NUMCOMMITS" != "$VERSION" ]; then
VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3`
LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev)
LASTNUM=$((LASTNUM+1))
FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev)
VERSION=${FIRSTPART}.${LASTNUM}
VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3`
fi
sed -e "s/#VERSION#/$VERSION/" confluent_osdeploy.spec.tmpl > confluent_osdeploy.spec
cd ..

View File

@@ -2,7 +2,11 @@ cd $(dirname $0)
VERSION=`git describe|cut -d- -f 1`
NUMCOMMITS=`git describe|cut -d- -f 2`
if [ "$NUMCOMMITS" != "$VERSION" ]; then
VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3`
LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev)
LASTNUM=$((LASTNUM+1))
FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev)
VERSION=${FIRSTPART}.${LASTNUM}
VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3`
fi
sed -e "s/#VERSION#/$VERSION/" confluent_osdeploy-aarch64.spec.tmpl > confluent_osdeploy-aarch64.spec
cd ..

View File

@@ -86,7 +86,7 @@ def map_idx_to_name():
for line in subprocess.check_output(['ip', 'l']).decode('utf8').splitlines():
if line.startswith(' ') and 'link/' in line:
typ = line.split()[0].split('/')[1]
devtype[prevdev] = typ if type != 'ether' else 'ethernet'
devtype[prevdev] = typ if typ != 'ether' else 'ethernet'
if line.startswith(' '):
continue
idx, iface, rst = line.split(':', 2)
@@ -192,8 +192,10 @@ class NetplanManager(object):
if needcfgwrite:
needcfgapply = True
newcfg = {'network': {'version': 2, 'ethernets': {devname: self.cfgbydev[devname]}}}
oumask = os.umask(0o77)
with open('/etc/netplan/{0}-confluentcfg.yaml'.format(devname), 'w') as planout:
planout.write(yaml.dump(newcfg))
os.umask(oumask)
if needcfgapply:
subprocess.call(['netplan', 'apply'])
@@ -403,19 +405,36 @@ class NetworkManager(object):
else:
cname = stgs.get('connection_name', None)
iname = list(cfg['interfaces'])[0]
if not cname:
cname = iname
ctype = self.devtypes.get(iname, None)
if not ctype:
sys.stderr.write("Warning, no device found for interface_name ({0}), skipping setup\n".format(iname))
return
if stgs.get('vlan_id', None):
vlan = stgs['vlan_id']
if ctype == 'infiniband':
vlan = '0x{0}'.format(vlan) if not vlan.startswith('0x') else vlan
cmdargs['infiniband.parent'] = iname
cmdargs['infiniband.p-key'] = vlan
iname = '{0}.{1}'.format(iname, vlan[2:])
elif ctype == 'ethernet':
ctype = 'vlan'
cmdargs['vlan.parent'] = iname
cmdargs['vlan.id'] = vlan
iname = '{0}.{1}'.format(iname, vlan)
else:
sys.stderr.write("Warning, unknown interface_name ({0}) device type ({1}) for VLAN/PKEY, skipping setup\n".format(iname, ctype))
return
cname = iname if not cname else cname
u = self.uuidbyname.get(cname, None)
cargs = []
for arg in cmdargs:
cargs.append(arg)
cargs.append(cmdargs[arg])
if u:
cmdargs['connection.interface-name'] = iname
subprocess.check_call(['nmcli', 'c', 'm', u] + cargs)
subprocess.check_call(['nmcli', 'c', 'm', u, 'connection.interface-name', iname] + cargs)
subprocess.check_call(['nmcli', 'c', 'u', u])
else:
subprocess.check_call(['nmcli', 'c', 'add', 'type', self.devtypes[iname], 'con-name', cname, 'connection.interface-name', iname] + cargs)
subprocess.check_call(['nmcli', 'c', 'add', 'type', ctype, 'con-name', cname, 'connection.interface-name', iname] + cargs)
self.read_connections()
u = self.uuidbyname.get(cname, None)
if u:
@@ -436,6 +455,12 @@ if __name__ == '__main__':
srvs, _ = apiclient.scan_confluents()
doneidxs = set([])
dc = None
if not srvs: # the multicast scan failed, fallback to deploycfg cfg file
with open('/etc/confluent/confluent.deploycfg', 'r') as dci:
for cfgline in dci.read().split('\n'):
if cfgline.startswith('deploy_server:'):
srvs = [cfgline.split()[1]]
break
for srv in srvs:
try:
s = socket.create_connection((srv, 443))
@@ -498,6 +523,8 @@ if __name__ == '__main__':
netname_to_interfaces['default']['interfaces'] -= netname_to_interfaces[netn]['interfaces']
if not netname_to_interfaces['default']['interfaces']:
del netname_to_interfaces['default']
# Make sure VLAN/PKEY connections are created last
netname_to_interfaces = dict(sorted(netname_to_interfaces.items(), key=lambda item: 'vlan_id' in item[1]['settings']))
rm_tmp_llas(tmpllas)
if os.path.exists('/usr/sbin/netplan'):
nm = NetplanManager(dc)

View File

@@ -3,6 +3,9 @@
[ -f /opt/confluent/bin/apiclient ] && confapiclient=/opt/confluent/bin/apiclient
[ -f /etc/confluent/apiclient ] && confapiclient=/etc/confluent/apiclient
for pubkey in /etc/ssh/ssh_host*key.pub; do
if [ "$pubkey" = /etc/ssh/ssh_host_key.pub ]; then
continue
fi
certfile=${pubkey/.pub/-cert.pub}
rm $certfile
confluentpython $confapiclient /confluent-api/self/sshcert $pubkey -o $certfile

View File

@@ -27,7 +27,7 @@ with open('/etc/confluent/confluent.deploycfg') as dplcfgfile:
_, profile = line.split(' ', 1)
if line.startswith('ipv4_method: '):
_, v4cfg = line.split(' ', 1)
if v4cfg == 'static' or v4cfg =='dhcp':
if v4cfg == 'static' or v4cfg =='dhcp' or not server6:
server = server4
if not server:
server = '[{}]'.format(server6)

View File

@@ -90,8 +90,14 @@ touch /tmp/cryptpkglist
touch /tmp/pkglist
touch /tmp/addonpackages
if [ "$cryptboot" == "tpm2" ]; then
LUKSPARTY="--encrypted --passphrase=$(cat /etc/confluent/confluent.apikey)"
echo $cryptboot >> /tmp/cryptboot
lukspass=$(python3 /opt/confluent/bin/apiclient /confluent-api/self/profileprivate/pending/luks.key 2> /dev/null)
if [ -z "$lukspass" ]; then
lukspass=$(python3 -c 'import os;import base64;print(base64.b64encode(os.urandom(66)).decode())')
fi
echo $lukspass > /etc/confluent/luks.key
chmod 000 /etc/confluent/luks.key
LUKSPARTY="--encrypted --passphrase=$lukspass"
echo $cryptboot >> /tmp/cryptboot
echo clevis-dracut >> /tmp/cryptpkglist
fi
@@ -114,8 +120,8 @@ confluentpython /etc/confluent/apiclient /confluent-public/os/$confluent_profile
grep '^%include /tmp/partitioning' /tmp/kickstart.* > /dev/null || rm /tmp/installdisk
if [ -e /tmp/installdisk -a ! -e /tmp/partitioning ]; then
INSTALLDISK=$(cat /tmp/installdisk)
sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e s/%%LUKSHOOK%%/$LUKSPARTY/ /tmp/partitioning.template > /tmp/partitioning
dd if=/dev/zero of=/dev/$(cat /tmp/installdisk) bs=1M count=1 >& /dev/null
sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e "s!%%LUKSHOOK%%!$LUKSPARTY!" /tmp/partitioning.template > /tmp/partitioning
vgchange -a n >& /dev/null
wipefs -a -f /dev/$INSTALLDISK >& /dev/null
fi
kill $logshowpid

View File

@@ -1,4 +1,5 @@
#!/bin/sh
cryptdisk=$(blkid -t TYPE="crypto_LUKS"|sed -e s/:.*//)
clevis luks bind -f -d $cryptdisk -k - tpm2 '{}' < /etc/confluent/confluent.apikey
cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey
clevis luks bind -f -d $cryptdisk -k - tpm2 '{}' < /etc/confluent/luks.key
chmod 000 /etc/confluent/luks.key
#cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey

View File

@@ -171,6 +171,13 @@ permissions=
wait-device-timeout=60000
EOC
if [ "$linktype" = infiniband ]; then
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
[infiniband]
transport-mode=datagram
EOC
fi
autoconfigmethod=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
auto6configmethod=$(grep ^ipv6_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
if [ "$autoconfigmethod" = "dhcp" ]; then

View File

@@ -58,6 +58,10 @@ if ! grep console= /proc/cmdline > /dev/null; then
echo "Automatic console configured for $autocons"
fi
echo sshd:x:30:30:SSH User:/var/empty/sshd:/sbin/nologin >> /etc/passwd
modprobe ib_ipoib
modprobe ib_umad
modprobe hfi1
modprobe mlx5_ib
cd /sys/class/net
for nic in *; do
ip link set $nic up

View File

@@ -8,7 +8,7 @@ for addr in $(grep ^MANAGER: /etc/confluent/confluent.info|awk '{print $2}'|sed
fi
done
mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay
if grep confluennt_imagemethtod=untethered /proc/cmdline > /dev/null; then
if grep confluent_imagemethod=untethered /proc/cmdline > /dev/null; then
mount -t tmpfs untethered /mnt/remoteimg
curl https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs -o /mnt/remoteimg/rootimg.sfs
else

View File

@@ -0,0 +1,11 @@
[Unit]
Description=Confluent onboot hook
Requires=network-online.target
After=network-online.target
[Service]
ExecStart=/opt/confluent/bin/onboot.sh
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,40 @@
#!/bin/bash
# This script is executed on each boot as it is
# completed. It is best to edit the middle of the file as
# noted below so custom commands are executed before
# the script notifies confluent that install is fully complete.
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
v4meth=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg|awk '{print $2}')
if [ "$v4meth" = "null" -o -z "$v4meth" ]; then
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg|awk '{print $2}')
fi
if [ -z "$confluent_mgr" ]; then
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
fi
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
timedatectl set-timezone $(grep ^timezone: /etc/confluent/confluent.deploycfg|awk '{print $2}')
hostnamectl set-hostname $nodename
export nodename confluent_mgr confluent_profile
. /etc/confluent/functions
mkdir -p /var/log/confluent
chmod 700 /var/log/confluent
exec >> /var/log/confluent/confluent-onboot.log
exec 2>> /var/log/confluent/confluent-onboot.log
chmod 600 /var/log/confluent/confluent-onboot.log
tail -f /var/log/confluent/confluent-onboot.log > /dev/console &
logshowpid=$!
run_remote_python syncfileclient
run_remote_python confignet
# onboot scripts may be placed into onboot.d, e.g. onboot.d/01-firstaction.sh, onboot.d/02-secondaction.sh
run_remote_parts onboot.d
# Induce execution of remote configuration, e.g. ansible plays in ansible/onboot.d/
run_remote_config onboot.d
#curl -X POST -d 'status: booted' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
kill $logshowpid

View File

@@ -26,12 +26,14 @@ if [ -e /tmp/cnflnthmackeytmp ]; then
chroot . curl -f -H "CONFLUENT_NODENAME: $NODENAME" -H "CONFLUENT_CRYPTHMAC: $(cat /root/$hmacfile)" -d @/tmp/cnflntcryptfile https://$MGR/confluent-api/self/registerapikey
cp /root/$passfile /root/custom-installation/confluent/confluent.apikey
DEVICE=$(cat /tmp/autodetectnic)
IP=done
else
chroot . custom-installation/confluent/bin/clortho $NODENAME $MGR > /root/custom-installation/confluent/confluent.apikey
MGR=[$MGR]
nic=$(grep ^MANAGER /custom-installation/confluent/confluent.info|grep fe80::|sed -e s/.*%//|head -n 1)
nic=$(ip link |grep ^$nic:|awk '{print $2}')
DEVICE=${nic%:}
IP=done
fi
if [ -z "$MGTIFACE" ]; then
chroot . usr/bin/curl -f -H "CONFLUENT_NODENAME: $NODENAME" -H "CONFLUENT_APIKEY: $(cat /root//custom-installation/confluent/confluent.apikey)" https://${MGR}/confluent-api/self/deploycfg > $deploycfg

View File

@@ -79,8 +79,12 @@ if [ ! -z "$cons" ]; then
fi
echo "Preparing to deploy $osprofile from $MGR"
echo $osprofile > /custom-installation/confluent/osprofile
echo URL=http://${MGR}/confluent-public/os/$osprofile/distribution/install.iso >> /conf/param.conf
fcmdline="$(cat /custom-installation/confluent/cmdline.orig) url=http://${MGR}/confluent-public/os/$osprofile/distribution/install.iso"
. /etc/os-release
DIRECTISO=$(blkid -t TYPE=iso9660 |grep -Ei ' LABEL="Ubuntu-Server '$VERSION_ID)
if [ -z "$DIRECTISO" ]; then
echo URL=http://${MGR}/confluent-public/os/$osprofile/distribution/install.iso >> /conf/param.conf
fcmdline="$(cat /custom-installation/confluent/cmdline.orig) url=http://${MGR}/confluent-public/os/$osprofile/distribution/install.iso"
fi
if [ ! -z "$cons" ]; then
fcmdline="$fcmdline console=${cons#/dev/}"
fi

View File

@@ -0,0 +1,12 @@
import yaml
import os
ainst = {}
with open('/autoinstall.yaml', 'r') as allin:
ainst = yaml.safe_load(allin)
ainst['storage']['layout']['password'] = os.environ['lukspass']
with open('/autoinstall.yaml', 'w') as allout:
yaml.safe_dump(ainst, allout)

View File

@@ -3,11 +3,11 @@ echo "Confluent first boot is running"
HOME=$(getent passwd $(whoami)|cut -d: -f 6)
export HOME
(
exec >> /target/var/log/confluent/confluent-firstboot.log
exec 2>> /target/var/log/confluent/confluent-firstboot.log
chmod 600 /target/var/log/confluent/confluent-firstboot.log
exec >> /var/log/confluent/confluent-firstboot.log
exec 2>> /var/log/confluent/confluent-firstboot.log
chmod 600 /var/log/confluent/confluent-firstboot.log
cp -a /etc/confluent/ssh/* /etc/ssh/
systemctl restart sshd
systemctl restart ssh
rootpw=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg |awk '{print $2}')
if [ ! -z "$rootpw" -a "$rootpw" != "null" ]; then
echo root:$rootpw | chpasswd -e
@@ -27,4 +27,4 @@ run_remote_parts firstboot.d
run_remote_config firstboot.d
curl --capath /etc/confluent/tls -f -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" -X POST -d "status: complete" https://$confluent_mgr/confluent-api/self/updatestatus
) &
tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console
tail --pid $! -n 0 -F /var/log/confluent/confluent-post.log > /dev/console

View File

@@ -0,0 +1,26 @@
#!/usr/bin/python3
import yaml
import os
ainst = {}
with open('/autoinstall.yaml', 'r') as allin:
ainst = yaml.safe_load(allin)
tz = None
ntps = []
with open('/etc/confluent/confluent.deploycfg', 'r') as confluentdeploycfg:
dcfg = yaml.safe_load(confluentdeploycfg)
tz = dcfg['timezone']
ntps = dcfg.get('ntpservers', [])
if ntps and not ainst.get('ntp', None):
ainst['ntp'] = {}
ainst['ntp']['enabled'] = True
ainst['ntp']['servers'] = ntps
if tz and not ainst.get('timezone'):
ainst['timezone'] = tz
with open('/autoinstall.yaml', 'w') as allout:
yaml.safe_dump(ainst, allout)

View File

@@ -60,10 +60,12 @@ cp /custom-installation/confluent/bin/apiclient /target/opt/confluent/bin
mount -o bind /dev /target/dev
mount -o bind /proc /target/proc
mount -o bind /sys /target/sys
mount -o bind /run /target/run
mount -o bind /sys/firmware/efi/efivars /target/sys/firmware/efi/efivars
if [ 1 = $updategrub ]; then
chroot /target update-grub
fi
echo "Port 22" >> /etc/ssh/sshd_config
echo "Port 2222" >> /etc/ssh/sshd_config
echo "Match LocalPort 22" >> /etc/ssh/sshd_config
@@ -88,8 +90,36 @@ chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d
source /target/etc/confluent/functions
run_remote_config post
if [ -f /etc/confluent_lukspass ]; then
numdevs=$(lsblk -lo name,uuid|grep $(awk '{print $2}' < /target/etc/crypttab |sed -e s/UUID=//)|wc -l)
if [ 0$numdevs -ne 1 ]; then
wall "Unable to identify the LUKS device, halting install"
while :; do sleep 86400; done
fi
CRYPTTAB_SOURCE=$(awk '{print $2}' /target/etc/crypttab)
. /target/usr/lib/cryptsetup/functions
crypttab_resolve_source
if [ ! -e $CRYPTTAB_SOURCE ]; then
wall "Unable to find $CRYPTTAB_SOURCE, halting install"
while :; do sleep 86400; done
fi
cp /etc/confluent_lukspass /target/etc/confluent/luks.key
chmod 000 /target/etc/confluent/luks.key
lukspass=$(cat /etc/confluent_lukspass)
chroot /target apt install libtss2-rc0
PASSWORD=$lukspass chroot /target systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs="" $CRYPTTAB_SOURCE
fetch_remote systemdecrypt
mv systemdecrypt /target/etc/initramfs-tools/scripts/local-top/systemdecrypt
fetch_remote systemdecrypt-hook
mv systemdecrypt-hook /target/etc/initramfs-tools/hooks/systemdecrypt
chmod 755 /target/etc/initramfs-tools/scripts/local-top/systemdecrypt /target/etc/initramfs-tools/hooks/systemdecrypt
chroot /target update-initramfs -u
fi
python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged'
umount /target/sys /target/dev /target/proc
umount /target/sys /target/dev /target/proc /target/run
) &
tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console

View File

@@ -13,11 +13,6 @@ exec 2>> /var/log/confluent/confluent-pre.log
chmod 600 /var/log/confluent/confluent-pre.log
cryptboot=$(grep encryptboot: $deploycfg|sed -e 's/^encryptboot: //')
if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then
echo "****Encrypted boot requested, but not implemented for this OS, halting install" > /dev/console
[ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but not implemented for this OS,halting install" >> $(cat /tmp/autoconsdev))
while :; do sleep 86400; done
fi
cat /custom-installation/ssh/*pubkey > /root/.ssh/authorized_keys
@@ -45,6 +40,24 @@ if [ ! -e /tmp/installdisk ]; then
python3 /custom-installation/getinstalldisk
fi
sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml
run_remote_python mergetime
if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then
lukspass=$(python3 /opt/confluent/bin/apiclient /confluent-api/self/profileprivate/pending/luks.key 2> /dev/null)
if [ -z "$lukspass" ]; then
lukspass=$(head -c 66 < /dev/urandom |base64 -w0)
fi
export lukspass
run_remote_python addcrypt
if ! grep 'password:' /autoinstall.yaml > /dev/null; then
echo "****Encrypted boot requested, but the user-data does not have a hook to enable,halting install" > /dev/console
[ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but the user-data does not have a hook to enable,halting install" >> $(cat /tmp/autoconsdev))
while :; do sleep 86400; done
fi
sed -i s!%%CRYPTPASS%%!$lukspass! /autoinstall.yaml
sed -i s!'#CRYPTBOOT'!! /autoinstall.yaml
echo -n $lukspass > /etc/confluent_lukspass
chmod 000 /etc/confluent_lukspass
fi
) &
tail --pid $! -n 0 -F /var/log/confluent/confluent-pre.log > /dev/console

View File

@@ -0,0 +1,17 @@
#!/bin/sh
case $1 in
prereqs)
echo
exit 0
;;
esac
systemdecryptnow() {
. /usr/lib/cryptsetup/functions
local CRYPTTAB_SOURCE=$(awk '{print $2}' /systemdecrypt/crypttab)
local CRYPTTAB_NAME=$(awk '{print $1}' /systemdecrypt/crypttab)
crypttab_resolve_source
/lib/systemd/systemd-cryptsetup attach "${CRYPTTAB_NAME}" "${CRYPTTAB_SOURCE}" none tpm2-device=auto
}
systemdecryptnow

View File

@@ -0,0 +1,26 @@
#!/bin/sh
case "$1" in
prereqs)
echo
exit 0
;;
esac
. /usr/share/initramfs-tools/hook-functions
mkdir -p $DESTDIR/systemdecrypt
copy_exec /lib/systemd/systemd-cryptsetup /lib/systemd
for i in /lib/x86_64-linux-gnu/libtss2*
do
copy_exec ${i} /lib/x86_64-linux-gnu
done
if [ -f /lib/x86_64-linux-gnu/cryptsetup/libcryptsetup-token-systemd-tpm2.so ]; then
mkdir -p $DESTDIR/lib/x86_64-linux-gnu/cryptsetup
copy_exec /lib/x86_64-linux-gnu/cryptsetup/libcryptsetup-token-systemd-tpm2.so /lib/x86_64-linux-gnu/cryptsetup
fi
mkdir -p $DESTDIR/scripts/local-top
echo /scripts/local-top/systemdecrypt >> $DESTDIR/scripts/local-top/ORDER
if [ -f $DESTDIR/cryptroot/crypttab ]; then
mv $DESTDIR/cryptroot/crypttab $DESTDIR/systemdecrypt/crypttab
fi

View File

@@ -24,6 +24,9 @@ import eventlet
import greenlet
import pwd
import signal
import confluent.collective.manager as collective
import confluent.noderange as noderange
def fprint(txt):
sys.stdout.write(txt)
@@ -258,6 +261,9 @@ if __name__ == '__main__':
uuid = rsp.get('id.uuid', {}).get('value', None)
if uuid:
uuidok = True
if 'collective.managercandidates' in rsp:
# Check if current node in candidates
pass
if 'deployment.useinsecureprotocols' in rsp:
insec = rsp.get('deployment.useinsecureprotocols', {}).get('value', None)
if insec != 'firmware':
@@ -276,8 +282,27 @@ if __name__ == '__main__':
switch_value = rsp[key].get('value',None)
if switch_value and switch_value not in valid_nodes:
emprint(f'{switch_value} is not a valid node name (as referenced by attribute "{key}" of node {args.node}).')
print(f"Checking network configuration for {args.node}")
cfg = configmanager.ConfigManager(None)
cfd = cfg.get_node_attributes(
args.node, ('deployment.*', 'collective.managercandidates'))
profile = cfd.get(args.node, {}).get(
'deployment.pendingprofile', {}).get('value', None)
if not profile:
emprint(
f'{args.node} is not currently set to deploy any '
'profile, network boot attempts will be ignored')
candmgrs = cfd.get(args.node, {}).get(
'collective.managercandidates', {}).get('value', None)
if candmgrs:
try:
candmgrs = noderange.NodeRange(candmgrs, cfg).nodes
except Exception: # fallback to unverified noderange
candmgrs = noderange.NodeRange(candmgrs).nodes
if collective.get_myname() not in candmgrs:
emprint(f'{args.node} has deployment restricted to '
'certain collective managers excluding the '
'system running the selfcheck')
print(f"Checking network configuration for {args.node}")
bootablev4nics = []
bootablev6nics = []
targsships = []
@@ -298,7 +323,7 @@ if __name__ == '__main__':
print('{} appears to have networking configuration suitable for IPv6 deployment via: {}'.format(args.node, ",".join(bootablev6nics)))
else:
emprint(f"{args.node} may not have any viable IP network configuration (check name resolution (DNS or hosts file) "
"and/or net.*ipv4_address, and verify that the deployment serer addresses and subnet mask/prefix length are accurate)")
"and/or net.*ipv4_address, and verify that the deployment server addresses and subnet mask/prefix length are accurate)")
if not uuidok and not macok:
allok = False
emprint(f'{args.node} does not have a uuid or mac address defined in id.uuid or net.*hwaddr, deployment will not work (Example resolution: nodeinventory {args.node} -s)')

View File

@@ -1,7 +1,7 @@
#!/usr/bin/python2
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
# Copyright 2017,2024 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -30,7 +30,7 @@ import confluent.config.conf as conf
import confluent.main as main
argparser = optparse.OptionParser(
usage="Usage: %prog [options] [dump|restore] [path]")
usage="Usage: %prog [options] [dump|restore|merge] [path]")
argparser.add_option('-p', '--password',
help='Password to use to protect/unlock a protected dump')
argparser.add_option('-i', '--interactivepassword', help='Prompt for password',
@@ -51,13 +51,13 @@ argparser.add_option('-s', '--skipkeys', action='store_true',
'data is needed. keys do not change and as such '
'they do not require incremental backup')
(options, args) = argparser.parse_args()
if len(args) != 2 or args[0] not in ('dump', 'restore'):
if len(args) != 2 or args[0] not in ('dump', 'restore', 'merge'):
argparser.print_help()
sys.exit(1)
dumpdir = args[1]
if args[0] == 'restore':
if args[0] in ('restore', 'merge'):
pid = main.is_running()
if pid is not None:
print("Confluent is running, must shut down to restore db")
@@ -69,9 +69,22 @@ if args[0] == 'restore':
if options.interactivepassword:
password = getpass.getpass('Enter password to restore backup: ')
try:
cfm.init(True)
cfm.statelessmode = True
cfm.restore_db_from_directory(dumpdir, password)
stateless = args[0] == 'restore'
cfm.init(stateless)
cfm.statelessmode = stateless
skipped = {'nodes': [], 'nodegroups': []}
cfm.restore_db_from_directory(
dumpdir, password,
merge="skip" if args[0] == 'merge' else False, skipped=skipped)
if skipped['nodes']:
skippedn = ','.join(skipped['nodes'])
print('The following nodes were skipped during merge: '
'{}'.format(skippedn))
if skipped['nodegroups']:
skippedn = ','.join(skipped['nodegroups'])
print('The following node groups were skipped during merge: '
'{}'.format(skippedn))
cfm.statelessmode = False
cfm.ConfigManager.wait_for_sync(True)
if owner != 0:

View File

@@ -17,7 +17,9 @@ cd /tmp/confluent/$PKGNAME
if [ -x ./makeman ]; then
./makeman
fi
./makesetup
sed -e 's/~/./' ./makesetup > ./makesetup.deb
chmod +x ./makesetup.deb
./makesetup.deb
VERSION=`cat VERSION`
cat > setup.cfg << EOF
[install]
@@ -35,8 +37,10 @@ cd deb_dist/!(*.orig)/
if [ "$OPKGNAME" = "confluent-server" ]; then
if grep wheezy /etc/os-release; then
sed -i 's/^\(Depends:.*\)/\1, python-confluent-client, python-lxml, python-eficompressor, python-pycryptodomex, python-dateutil, python-pyopenssl, python-msgpack/' debian/control
elif grep jammy /etc/os-release; then
sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi(>=1.5.71), python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd, python3-netifaces, python3-yaml, python3-dateutil/' debian/control
else
sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi, python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd, python3-netifaces, python3-yaml, python3-dateutil, python3-pyasyncore/' debian/control
sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi(>=1.5.71), python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd, python3-netifaces, python3-yaml, python3-dateutil, python3-pyasyncore/' debian/control
fi
if grep wheezy /etc/os-release; then
echo 'confluent_client python-confluent-client' >> debian/pydist-overrides
@@ -72,7 +76,7 @@ else
rm -rf $PKGNAME.egg-info dist setup.py
rm -rf $(find deb_dist -mindepth 1 -maxdepth 1 -type d)
if [ ! -z "$1" ]; then
mv deb_dist/* $1/
mv deb_dist/*.deb $1/
fi
fi
exit 0

View File

@@ -58,7 +58,7 @@ _allowedbyrole = {
'/nodes/',
'/node*/media/uploads/',
'/node*/inventory/firmware/updates/*',
'/node*/suppport/servicedata*',
'/node*/support/servicedata*',
'/node*/attributes/expression',
'/nodes/*/console/session*',
'/nodes/*/shell/sessions*',

View File

@@ -76,7 +76,7 @@ def get_certificate_paths():
continue
kploc = check_apache_config(os.path.join(currpath,
fname))
if keypath and kploc[0]:
if keypath and kploc[0] and keypath != kploc[0]:
return None, None # Ambiguous...
if kploc[0]:
keypath, certpath = kploc

View File

@@ -469,9 +469,13 @@ node = {
'net.interface_names': {
'description': 'Interface name or comma delimited list of names to match for this interface. It is generally recommended '
'to leave this blank unless needing to set up interfaces that are not on a common subnet with a confluent server, '
'as confluent servers provide autodetection for matching the correct network definition to an interface.'
'as confluent servers provide autodetection for matching the correct network definition to an interface. '
'This would be the default name per the deployed OS and can be a comma delimited list to denote members of '
'a team'
'a team or a single interface for VLAN/PKEY connections.'
},
'net.vlan_id': {
'description': 'Ethernet VLAN or InfiniBand PKEY to use for this connection. '
'Specify the parent device using net.interface_names.'
},
'net.ipv4_address': {
'description': 'When configuring static, use this address. If '

View File

@@ -1903,7 +1903,7 @@ class ConfigManager(object):
def add_group_attributes(self, attribmap):
self.set_group_attributes(attribmap, autocreate=True)
def set_group_attributes(self, attribmap, autocreate=False):
def set_group_attributes(self, attribmap, autocreate=False, merge="replace", keydata=None, skipped=None):
for group in attribmap:
curr = attribmap[group]
for attrib in curr:
@@ -1924,11 +1924,11 @@ class ConfigManager(object):
if cfgstreams:
exec_on_followers('_rpc_set_group_attributes', self.tenant,
attribmap, autocreate)
self._true_set_group_attributes(attribmap, autocreate)
self._true_set_group_attributes(attribmap, autocreate, merge=merge, keydata=keydata, skipped=skipped)
def _true_set_group_attributes(self, attribmap, autocreate=False):
def _true_set_group_attributes(self, attribmap, autocreate=False, merge="replace", keydata=None, skipped=None):
changeset = {}
for group in attribmap:
for group in list(attribmap):
if group == '':
raise ValueError('"{0}" is not a valid group name'.format(
group))
@@ -1941,6 +1941,11 @@ class ConfigManager(object):
group))
if not autocreate and group not in self._cfgstore['nodegroups']:
raise ValueError("{0} group does not exist".format(group))
if merge == 'skip' and group in self._cfgstore['nodegroups']:
if skipped is not None:
skipped.append(group)
del attribmap[group]
continue
for attr in list(attribmap[group]):
# first do a pass to normalize out any aliased attribute names
if attr in _attraliases:
@@ -2015,6 +2020,9 @@ class ConfigManager(object):
newdict = {'value': attribmap[group][attr]}
else:
newdict = attribmap[group][attr]
if keydata and attr.startswith('secret.') and 'cryptvalue' in newdict:
newdict['value'] = decrypt_value(newdict['cryptvalue'], keydata['cryptkey'], keydata['integritykey'])
del newdict['cryptvalue']
if 'value' in newdict and attr.startswith("secret."):
newdict['cryptvalue'] = crypt_value(newdict['value'])
del newdict['value']
@@ -2349,7 +2357,7 @@ class ConfigManager(object):
def set_node_attributes(self, attribmap, autocreate=False):
def set_node_attributes(self, attribmap, autocreate=False, merge="replace", keydata=None, skipped=None):
for node in attribmap:
curr = attribmap[node]
for attrib in curr:
@@ -2370,9 +2378,9 @@ class ConfigManager(object):
if cfgstreams:
exec_on_followers('_rpc_set_node_attributes',
self.tenant, attribmap, autocreate)
self._true_set_node_attributes(attribmap, autocreate)
self._true_set_node_attributes(attribmap, autocreate, merge, keydata, skipped)
def _true_set_node_attributes(self, attribmap, autocreate):
def _true_set_node_attributes(self, attribmap, autocreate, merge="replace", keydata=None, skipped=None):
# TODO(jbjohnso): multi mgr support, here if we have peers,
# pickle the arguments and fire them off in eventlet
# flows to peers, all should have the same result
@@ -2380,7 +2388,7 @@ class ConfigManager(object):
changeset = {}
# first do a sanity check of the input upfront
# this mitigates risk of arguments being partially applied
for node in attribmap:
for node in list(attribmap):
node = confluent.util.stringify(node)
if node == '':
raise ValueError('"{0}" is not a valid node name'.format(node))
@@ -2393,6 +2401,11 @@ class ConfigManager(object):
'"{0}" is not a valid node name'.format(node))
if autocreate is False and node not in self._cfgstore['nodes']:
raise ValueError("node {0} does not exist".format(node))
if merge == "skip" and node in self._cfgstore['nodes']:
del attribmap[node]
if skipped is not None:
skipped.append(node)
continue
if 'groups' not in attribmap[node] and node not in self._cfgstore['nodes']:
attribmap[node]['groups'] = []
for attrname in list(attribmap[node]):
@@ -2463,6 +2476,9 @@ class ConfigManager(object):
# add check here, skip None attributes
if newdict is None:
continue
if keydata and attrname.startswith('secret.') and 'cryptvalue' in newdict:
newdict['value'] = decrypt_value(newdict['cryptvalue'], keydata['cryptkey'], keydata['integritykey'])
del newdict['cryptvalue']
if 'value' in newdict and attrname.startswith("secret."):
newdict['cryptvalue'] = crypt_value(newdict['value'])
del newdict['value']
@@ -2503,19 +2519,21 @@ class ConfigManager(object):
self._bg_sync_to_file()
#TODO: wait for synchronization to suceed/fail??)
def _load_from_json(self, jsondata, sync=True):
def _load_from_json(self, jsondata, sync=True, merge=False, keydata=None, skipped=None):
self.inrestore = True
try:
self._load_from_json_backend(jsondata, sync=True)
self._load_from_json_backend(jsondata, sync=True, merge=merge, keydata=keydata, skipped=skipped)
finally:
self.inrestore = False
def _load_from_json_backend(self, jsondata, sync=True):
def _load_from_json_backend(self, jsondata, sync=True, merge=False, keydata=None, skipped=None):
"""Load fresh configuration data from jsondata
:param jsondata: String of jsondata
:return:
"""
if not skipped:
skipped = {'nodes': None, 'nodegroups': None}
dumpdata = json.loads(jsondata)
tmpconfig = {}
for confarea in _config_areas:
@@ -2563,20 +2581,27 @@ class ConfigManager(object):
pass
# Now we have to iterate through each fixed up element, using the
# set attribute to flesh out inheritence and expressions
_cfgstore['main']['idmap'] = {}
if (not merge) or _cfgstore.get('main', {}).get('idmap', None) is None:
_cfgstore['main']['idmap'] = {}
attribmerge = merge if merge else "replace"
for confarea in _config_areas:
self._cfgstore[confarea] = {}
if not merge or confarea not in self._cfgstore:
self._cfgstore[confarea] = {}
if confarea not in tmpconfig:
continue
if confarea == 'nodes':
self.set_node_attributes(tmpconfig[confarea], True)
self.set_node_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata, skipped=skipped['nodes'])
elif confarea == 'nodegroups':
self.set_group_attributes(tmpconfig[confarea], True)
self.set_group_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata, skipped=skipped['nodegroups'])
elif confarea == 'usergroups':
if merge:
continue
for usergroup in tmpconfig[confarea]:
role = tmpconfig[confarea][usergroup].get('role', 'Administrator')
self.create_usergroup(usergroup, role=role)
elif confarea == 'users':
if merge:
continue
for user in tmpconfig[confarea]:
ucfg = tmpconfig[confarea][user]
uid = ucfg.get('id', None)
@@ -2647,7 +2672,7 @@ class ConfigManager(object):
dumpdata[confarea][element][attribute]['cryptvalue'] = '!'.join(cryptval)
elif isinstance(dumpdata[confarea][element][attribute], set):
dumpdata[confarea][element][attribute] = \
list(dumpdata[confarea][element][attribute])
confluent.util.natural_sort(list(dumpdata[confarea][element][attribute]))
return json.dumps(
dumpdata, sort_keys=True, indent=4, separators=(',', ': '))
@@ -2876,7 +2901,7 @@ def _restore_keys(jsond, password, newpassword=None, sync=True):
newpassword = keyfile.read()
set_global('master_privacy_key', _format_key(cryptkey,
password=newpassword), sync)
if integritykey:
if integritykey:
set_global('master_integrity_key', _format_key(integritykey,
password=newpassword), sync)
_masterkey = cryptkey
@@ -2911,35 +2936,46 @@ def _dump_keys(password, dojson=True):
return keydata
def restore_db_from_directory(location, password):
def restore_db_from_directory(location, password, merge=False, skipped=None):
kdd = None
try:
with open(os.path.join(location, 'keys.json'), 'r') as cfgfile:
keydata = cfgfile.read()
json.loads(keydata)
_restore_keys(keydata, password)
kdd = json.loads(keydata)
if merge:
if 'cryptkey' in kdd:
kdd['cryptkey'] = _parse_key(kdd['cryptkey'], password)
if 'integritykey' in kdd:
kdd['integritykey'] = _parse_key(kdd['integritykey'], password)
else:
kdd['integritykey'] = None # GCM
else:
kdd = None
_restore_keys(keydata, password)
except IOError as e:
if e.errno == 2:
raise Exception("Cannot restore without keys, this may be a "
"redacted dump")
try:
moreglobals = json.load(open(os.path.join(location, 'globals.json')))
for globvar in moreglobals:
set_global(globvar, moreglobals[globvar])
except IOError as e:
if e.errno != 2:
raise
try:
collective = json.load(open(os.path.join(location, 'collective.json')))
_cfgstore['collective'] = {}
for coll in collective:
add_collective_member(coll, collective[coll]['address'],
collective[coll]['fingerprint'])
except IOError as e:
if e.errno != 2:
raise
if not merge:
try:
moreglobals = json.load(open(os.path.join(location, 'globals.json')))
for globvar in moreglobals:
set_global(globvar, moreglobals[globvar])
except IOError as e:
if e.errno != 2:
raise
try:
collective = json.load(open(os.path.join(location, 'collective.json')))
_cfgstore['collective'] = {}
for coll in collective:
add_collective_member(coll, collective[coll]['address'],
collective[coll]['fingerprint'])
except IOError as e:
if e.errno != 2:
raise
with open(os.path.join(location, 'main.json'), 'r') as cfgfile:
cfgdata = cfgfile.read()
ConfigManager(tenant=None)._load_from_json(cfgdata)
ConfigManager(tenant=None)._load_from_json(cfgdata, merge=merge, keydata=kdd, skipped=skipped)
ConfigManager.wait_for_sync(True)

View File

@@ -70,10 +70,11 @@ import os
import eventlet.green.socket as socket
import struct
import sys
import uuid
import yaml
pluginmap = {}
dispatch_plugins = (b'ipmi', u'ipmi', b'redfish', u'redfish', b'tsmsol', u'tsmsol', b'geist', u'geist', b'deltapdu', u'deltapdu', b'eatonpdu', u'eatonpdu', b'affluent', u'affluent', b'cnos', u'cnos')
dispatch_plugins = (b'ipmi', u'ipmi', b'redfish', u'redfish', b'tsmsol', u'tsmsol', b'geist', u'geist', b'deltapdu', u'deltapdu', b'eatonpdu', u'eatonpdu', b'affluent', u'affluent', b'cnos', u'cnos', b'enos', u'enos')
PluginCollection = plugin.PluginCollection
@@ -161,8 +162,9 @@ def _merge_dict(original, custom):
rootcollections = ['deployment/', 'discovery/', 'events/', 'networking/',
'noderange/', 'nodes/', 'nodegroups/', 'storage/', 'usergroups/' ,
'users/', 'uuid', 'version']
'noderange/', 'nodes/', 'nodegroups/', 'storage/', 'usergroups/',
'users/', 'uuid', 'version', 'staging/']
class PluginRoute(object):
@@ -358,6 +360,10 @@ def _init_core():
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'extra_advanced': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
},
},
'storage': {
@@ -426,6 +432,7 @@ def _init_core():
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'ikvm': PluginRoute({'handler': 'ikvm'}),
},
'description': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
@@ -1262,6 +1269,87 @@ def handle_discovery(pathcomponents, operation, configmanager, inputdata):
if pathcomponents[0] == 'detected':
pass
class Staging:
def __init__(self, user, uuid):
self.uuid_str = uuid
self.storage_folder = '/var/lib/confluent/client_assets/' + self.uuid_str
self.filename = None
self.user = user
self.base_folder = os.path.exists('/var/lib/confluent/client_assets/')
if not self.base_folder:
try:
os.mkdir('/var/lib/confluent/client_assets/')
except Exception as e:
raise OSError(str(e))
def getUUID(self):
return self.uuid_str
def get_push_url(self):
return 'staging/{0}/{1}'.format(self.user,self.uuid_str)
def create_directory(self):
try:
os.mkdir(self.storage_folder)
return True
except OSError as e:
raise exc.InvalidArgumentException(str(e))
def get_file_name(self):
stage_file = '{}/filename.txt'.format(self.storage_folder)
try:
with open(stage_file, 'r') as f:
filename = f.readline()
os.remove(stage_file)
return self.storage_folder + '/{}'.format(filename)
except FileNotFoundError:
file = None
return False
def deldirectory(self):
pass
def handle_staging(pathcomponents, operation, configmanager, inputdata):
'''
e.g push_url: /confluent-api/staging/user/<unique_id>
'''
if operation == 'create':
if len(pathcomponents) == 1:
stage = Staging(inputdata['user'],str(uuid.uuid1()))
if stage.create_directory():
if 'filename' in inputdata:
data_file = stage.storage_folder + '/filename.txt'
with open(data_file, 'w') as f:
f.write(inputdata['filename'])
else:
raise Exception('Error: Missing filename arg')
push_url = stage.get_push_url()
yield msg.CreatedResource(push_url)
elif len(pathcomponents) == 3:
stage = Staging(pathcomponents[1], pathcomponents[2])
file = stage.get_file_name()
if 'filedata' in inputdata and file:
content_length = inputdata['content_length']
remaining_length = content_length
filedata = inputdata['filedata']
chunk_size = 16384
progress = 0.0
with open(file, 'wb') as f:
while remaining_length > 0:
progress = (1 - (remaining_length/content_length)) * 100
datachunk = filedata['wsgi.input'].read(min(chunk_size, remaining_length))
f.write(datachunk)
remaining_length -= len(datachunk)
yield msg.FileUploadProgress(progress)
yield msg.FileUploadProgress(100)
elif operation == 'retrieve':
pass
return
def handle_path(path, operation, configmanager, inputdata=None, autostrip=True):
"""Given a full path request, return an object.
@@ -1370,5 +1458,7 @@ def handle_path(path, operation, configmanager, inputdata=None, autostrip=True):
elif pathcomponents[0] == 'discovery':
return handle_discovery(pathcomponents[1:], operation, configmanager,
inputdata)
elif pathcomponents[0] == 'staging':
return handle_staging(pathcomponents, operation, configmanager, inputdata)
else:
raise exc.NotFoundException()

View File

@@ -74,6 +74,8 @@ import confluent.discovery.handlers.tsm as tsm
import confluent.discovery.handlers.pxe as pxeh
import confluent.discovery.handlers.smm as smm
import confluent.discovery.handlers.xcc as xcc
import confluent.discovery.handlers.xcc3 as xcc3
import confluent.discovery.handlers.megarac as megarac
import confluent.exceptions as exc
import confluent.log as log
import confluent.messages as msg
@@ -113,6 +115,8 @@ nodehandlers = {
'service:lenovo-smm': smm,
'service:lenovo-smm2': smm,
'lenovo-xcc': xcc,
'lenovo-xcc3': xcc3,
'megarac-bmc': megarac,
'service:management-hardware.IBM:integrated-management-module2': imm,
'pxe-client': pxeh,
'onie-switch': None,
@@ -132,6 +136,8 @@ servicenames = {
'service:lenovo-smm2': 'lenovo-smm2',
'affluent-switch': 'affluent-switch',
'lenovo-xcc': 'lenovo-xcc',
'lenovo-xcc3': 'lenovo-xcc3',
'megarac-bmc': 'megarac-bmc',
#'openbmc': 'openbmc',
'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2',
'service:io-device.Lenovo:management-module': 'lenovo-switch',
@@ -147,6 +153,8 @@ servicebyname = {
'lenovo-smm2': 'service:lenovo-smm2',
'affluent-switch': 'affluent-switch',
'lenovo-xcc': 'lenovo-xcc',
'lenovo-xcc3': 'lenovo-xcc3',
'megarac-bmc': 'megarac-bmc',
'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2',
'lenovo-switch': 'service:io-device.Lenovo:management-module',
'thinkagile-storage': 'service:thinkagile-storagebmc',
@@ -453,7 +461,7 @@ def iterate_addrs(addrs, countonly=False):
yield 1
return
yield addrs
def _parameterize_path(pathcomponents):
listrequested = False
childcoll = True
@@ -542,7 +550,7 @@ def handle_api_request(configmanager, inputdata, operation, pathcomponents):
if len(pathcomponents) > 2:
raise Exception('TODO')
currsubs = get_subscriptions()
return [msg.ChildCollection(x) for x in currsubs]
return [msg.ChildCollection(x) for x in currsubs]
elif operation == 'retrieve':
return handle_read_api_request(pathcomponents)
elif (operation in ('update', 'create') and
@@ -1703,3 +1711,4 @@ if __name__ == '__main__':
start_detection()
while True:
eventlet.sleep(30)

View File

@@ -0,0 +1,51 @@
# Copyright 2024 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import confluent.discovery.handlers.redfishbmc as redfishbmc
import eventlet.support.greendns
getaddrinfo = eventlet.support.greendns.getaddrinfo
class NodeHandler(redfishbmc.NodeHandler):
def get_firmware_default_account_info(self):
return ('admin', 'admin')
def remote_nodecfg(nodename, cfm):
cfg = cfm.get_node_attributes(
nodename, 'hardwaremanagement.manager')
ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get(
'value', None)
ipaddr = ipaddr.split('/', 1)[0]
ipaddr = getaddrinfo(ipaddr, 0)[0][-1]
if not ipaddr:
raise Exception('Cannot remote configure a system without known '
'address')
info = {'addresses': [ipaddr]}
nh = NodeHandler(info, cfm)
nh.config(nodename)
if __name__ == '__main__':
import confluent.config.configmanager as cfm
c = cfm.ConfigManager(None)
import sys
info = {'addresses': [[sys.argv[1]]]}
print(repr(info))
testr = NodeHandler(info, c)
testr.config(sys.argv[2])

View File

@@ -0,0 +1,321 @@
# Copyright 2024 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import confluent.discovery.handlers.generic as generic
import confluent.exceptions as exc
import confluent.netutil as netutil
import confluent.util as util
import eventlet
import eventlet.support.greendns
import json
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
getaddrinfo = eventlet.support.greendns.getaddrinfo
webclient = eventlet.import_patched('pyghmi.util.webclient')
def get_host_interface_urls(wc, mginfo):
returls = []
hifurl = mginfo.get('HostInterfaces', {}).get('@odata.id', None)
if not hifurl:
return None
hifinfo = wc.grab_json_response(hifurl)
hifurls = hifinfo.get('Members', [])
for hifurl in hifurls:
hifurl = hifurl['@odata.id']
hifinfo = wc.grab_json_response(hifurl)
acturl = hifinfo.get('ManagerEthernetInterface', {}).get('@odata.id', None)
if acturl:
returls.append(acturl)
return returls
class NodeHandler(generic.NodeHandler):
devname = 'BMC'
def __init__(self, info, configmanager):
self.trieddefault = None
self.targuser = None
self.curruser = None
self.currpass = None
self.targpass = None
self.nodename = None
self.csrftok = None
self.channel = None
self.atdefault = True
self._srvroot = None
self._mgrinfo = None
super(NodeHandler, self).__init__(info, configmanager)
def srvroot(self, wc):
if not self._srvroot:
srvroot, status = wc.grab_json_response_with_status('/redfish/v1/')
if status == 200:
self._srvroot = srvroot
return self._srvroot
def mgrinfo(self, wc):
if not self._mgrinfo:
mgrs = self.srvroot(wc)['Managers']['@odata.id']
rsp = wc.grab_json_response(mgrs)
if len(rsp['Members']) != 1:
raise Exception("Can not handle multiple Managers")
mgrurl = rsp['Members'][0]['@odata.id']
self._mgrinfo = wc.grab_json_response(mgrurl)
return self._mgrinfo
def get_firmware_default_account_info(self):
raise Exception('This must be subclassed')
def scan(self):
c = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert)
i = c.grab_json_response('/redfish/v1/')
uuid = i.get('UUID', None)
if uuid:
self.info['uuid'] = uuid.lower()
def validate_cert(self, certificate):
# broadly speaking, merely checks consistency moment to moment,
# but if https_cert gets stricter, this check means something
fprint = util.get_fingerprint(self.https_cert)
return util.cert_matches(fprint, certificate)
def enable_ipmi(self, wc):
npu = self.mgrinfo(wc).get(
'NetworkProtocol', {}).get('@odata.id', None)
if not npu:
raise Exception('Cannot enable IPMI, no NetworkProtocol on BMC')
npi = wc.grab_json_response(npu)
if not npi.get('IPMI', {}).get('ProtocolEnabled'):
wc.set_header('If-Match', '*')
wc.grab_json_response_with_status(
npu, {'IPMI': {'ProtocolEnabled': True}}, method='PATCH')
acctinfo = wc.grab_json_response_with_status(
self.target_account_url(wc))
acctinfo = acctinfo[0]
actypes = acctinfo['AccountTypes']
candidates = acctinfo['AccountTypes@Redfish.AllowableValues']
if 'IPMI' not in actypes and 'IPMI' in candidates:
actypes.append('IPMI')
acctupd = {
'AccountTypes': actypes,
'Password': self.currpass,
}
rsp = wc.grab_json_response_with_status(
self.target_account_url(wc), acctupd, method='PATCH')
def _get_wc(self):
defuser, defpass = self.get_firmware_default_account_info()
wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert)
wc.set_basic_credentials(defuser, defpass)
wc.set_header('Content-Type', 'application/json')
wc.set_header('Accept', 'application/json')
authmode = 0
if not self.trieddefault:
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
if status == 403:
self.trieddefault = True
chgurl = None
rsp = json.loads(rsp)
currerr = rsp.get('error', {})
ecode = currerr.get('code', None)
if ecode.endswith('PasswordChangeRequired'):
for einfo in currerr.get('@Message.ExtendedInfo', []):
if einfo.get('MessageId', None).endswith('PasswordChangeRequired'):
for msgarg in einfo.get('MessageArgs'):
chgurl = msgarg
break
if chgurl:
if self.targpass == defpass:
raise Exception("Must specify a non-default password to onboard this BMC")
wc.set_header('If-Match', '*')
cpr = wc.grab_json_response_with_status(chgurl, {'Password': self.targpass}, method='PATCH')
if cpr[1] >= 200 and cpr[1] < 300:
self.curruser = defuser
self.currpass = self.targpass
wc.set_basic_credentials(self.curruser, self.currpass)
_, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
tries = 10
while status >= 300 and tries:
eventlet.sleep(1)
_, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
return wc
if status > 400:
self.trieddefault = True
if status == 401:
wc.set_basic_credentials(defuser, self.targpass)
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
if status == 200: # Default user still, but targpass
self.currpass = self.targpass
self.curruser = defuser
return wc
elif self.targuser != defuser:
wc.set_basic_credentials(self.targuser, self.targpass)
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
if status != 200:
raise Exception("Target BMC does not recognize firmware default credentials nor the confluent stored credential")
else:
self.curruser = defuser
self.currpass = defpass
return wc
if self.curruser:
wc.set_basic_credentials(self.curruser, self.currpass)
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
if status != 200:
return None
return wc
wc.set_basic_credentials(self.targuser, self.targpass)
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
if status != 200:
return None
self.curruser = self.targuser
self.currpass = self.targpass
return wc
def target_account_url(self, wc):
asrv = self.srvroot(wc).get('AccountService', {}).get('@odata.id')
rsp, status = wc.grab_json_response_with_status(asrv)
accts = rsp.get('Accounts', {}).get('@odata.id')
rsp, status = wc.grab_json_response_with_status(accts)
accts = rsp.get('Members', [])
for accturl in accts:
accturl = accturl.get('@odata.id', '')
if accturl:
rsp, status = wc.grab_json_response_with_status(accturl)
if rsp.get('UserName', None) == self.curruser:
targaccturl = accturl
break
else:
raise Exception("Unable to identify Account URL to modify on this BMC")
return targaccturl
def config(self, nodename):
mgrs = None
self.nodename = nodename
creds = self.configmanager.get_node_attributes(
nodename, ['secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword',
'hardwaremanagement.manager',
'hardwaremanagement.method',
'console.method'],
True)
cd = creds.get(nodename, {})
defuser, defpass = self.get_firmware_default_account_info()
user, passwd, _ = self.get_node_credentials(
nodename, creds, defuser, defpass)
user = util.stringify(user)
passwd = util.stringify(passwd)
self.targuser = user
self.targpass = passwd
wc = self._get_wc()
curruserinfo = {}
authupdate = {}
wc.set_header('Content-Type', 'application/json')
if user != self.curruser:
authupdate['UserName'] = user
if passwd != self.currpass:
authupdate['Password'] = passwd
if authupdate:
targaccturl = self.target_account_url(wc)
rsp, status = wc.grab_json_response_with_status(targaccturl, authupdate, method='PATCH')
if status >= 300:
raise Exception("Failed attempting to update credentials on BMC")
self.curruser = user
self.currpass = passwd
wc.set_basic_credentials(user, passwd)
_, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
tries = 10
while tries and status >= 300:
tries -= 1
eventlet.sleep(1.0)
_, status = wc.grab_json_response_with_status(
'/redfish/v1/Managers')
if (cd.get('hardwaremanagement.method', {}).get('value', 'ipmi') != 'redfish'
or cd.get('console.method', {}).get('value', None) == 'ipmi'):
self.enable_ipmi(wc)
if ('hardwaremanagement.manager' in cd and
cd['hardwaremanagement.manager']['value'] and
not cd['hardwaremanagement.manager']['value'].startswith(
'fe80::')):
newip = cd['hardwaremanagement.manager']['value']
newip = newip.split('/', 1)[0]
newipinfo = getaddrinfo(newip, 0)[0]
newip = newipinfo[-1][0]
if ':' in newip:
raise exc.NotImplementedException('IPv6 remote config TODO')
hifurls = get_host_interface_urls(wc, self.mgrinfo(wc))
mgtnicinfo = self.mgrinfo(wc)['EthernetInterfaces']['@odata.id']
mgtnicinfo = wc.grab_json_response(mgtnicinfo)
mgtnics = [x['@odata.id'] for x in mgtnicinfo.get('Members', [])]
actualnics = []
for candnic in mgtnics:
if candnic in hifurls:
continue
actualnics.append(candnic)
if len(actualnics) != 1:
raise Exception("Multi-interface BMCs are not supported currently")
currnet = wc.grab_json_response(actualnics[0])
netconfig = netutil.get_nic_config(self.configmanager, nodename, ip=newip)
newconfig = {
"Address": newip,
"SubnetMask": netutil.cidr_to_mask(netconfig['prefix']),
}
newgw = netconfig['ipv4_gateway']
if newgw:
newconfig['Gateway'] = newgw
else:
newconfig['Gateway'] = newip # required property, set to self just to have a value
for net in currnet.get("IPv4Addresses", []):
if net["Address"] == newip and net["SubnetMask"] == newconfig['SubnetMask'] and (not newgw or newconfig['Gateway'] == newgw):
break
else:
wc.set_header('If-Match', '*')
rsp, status = wc.grab_json_response_with_status(actualnics[0], {
'DHCPv4': {'DHCPEnabled': False},
'IPv4StaticAddresses': [newconfig]}, method='PATCH')
elif self.ipaddr.startswith('fe80::'):
self.configmanager.set_node_attributes(
{nodename: {'hardwaremanagement.manager': self.ipaddr}})
else:
raise exc.TargetEndpointUnreachable(
'hardwaremanagement.manager must be set to desired address (No IPv6 Link Local detected)')
def remote_nodecfg(nodename, cfm):
cfg = cfm.get_node_attributes(
nodename, 'hardwaremanagement.manager')
ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get(
'value', None)
ipaddr = ipaddr.split('/', 1)[0]
ipaddr = getaddrinfo(ipaddr, 0)[0][-1]
if not ipaddr:
raise Exception('Cannot remote configure a system without known '
'address')
info = {'addresses': [ipaddr]}
nh = NodeHandler(info, cfm)
nh.config(nodename)
if __name__ == '__main__':
import confluent.config.configmanager as cfm
c = cfm.ConfigManager(None)
import sys
info = {'addresses': [[sys.argv[1]]] }
print(repr(info))
testr = NodeHandler(info, c)
testr.config(sys.argv[2])

View File

@@ -408,6 +408,34 @@ class NodeHandler(immhandler.NodeHandler):
if user['users_user_name'] == '':
return user['users_user_id']
def create_tmp_account(self, wc):
rsp, status = wc.grab_json_response_with_status('/redfish/v1/AccountService/Accounts')
if status != 200:
raise Exception("Unable to list current accounts")
usednames = set([])
tmpnam = '6pmu0ezczzcp'
tpass = base64.b64encode(os.urandom(9)).decode() + 'Iw47$'
ntpass = base64.b64encode(os.urandom(9)).decode() + 'Iw47$'
for acct in rsp.get("Members", []):
url = acct.get("@odata.id", None)
if url:
uinfo = wc.grab_json_response(url)
usednames.add(uinfo.get('UserName', None))
if tmpnam in usednames:
raise Exception("Tmp account already exists")
rsp, status = wc.grab_json_response_with_status(
'/redfish/v1/AccountService/Accounts',
{'UserName': tmpnam, 'Password': tpass, 'RoleId': 'Administrator'})
if status >= 300:
raise Exception("Failure creating tmp account: " + repr(rsp))
tmpurl = rsp['@odata.id']
wc.set_basic_credentials(tmpnam, tpass)
rsp, status = wc.grab_json_response_with_status(
tmpurl, {'Password': ntpass}, method='PATCH')
wc.set_basic_credentials(tmpnam, ntpass)
return tmpurl
def _setup_xcc_account(self, username, passwd, wc):
userinfo = wc.grab_json_response('/api/dataset/imm_users')
uid = None
@@ -442,16 +470,29 @@ class NodeHandler(immhandler.NodeHandler):
wc.grab_json_response('/api/providers/logout')
wc.set_basic_credentials(self._currcreds[0], self._currcreds[1])
status = 503
tries = 2
tmpaccount = None
while status != 200:
tries -= 1
rsp, status = wc.grab_json_response_with_status(
'/redfish/v1/AccountService/Accounts/{0}'.format(uid),
{'UserName': username}, method='PATCH')
if status != 200:
rsp = json.loads(rsp)
if rsp.get('error', {}).get('code', 'Unknown') in ('Base.1.8.GeneralError', 'Base.1.12.GeneralError', 'Base.1.14.GeneralError'):
eventlet.sleep(4)
if tries:
eventlet.sleep(4)
elif tmpaccount:
wc.grab_json_response_with_status(tmpaccount, method='DELETE')
raise Exception('Failed renaming main account')
else:
tmpaccount = self.create_tmp_account(wc)
tries = 8
else:
break
if tmpaccount:
wc.set_basic_credentials(username, passwd)
wc.grab_json_response_with_status(tmpaccount, method='DELETE')
self.tmppasswd = None
self._currcreds = (username, passwd)
return
@@ -605,7 +646,10 @@ class NodeHandler(immhandler.NodeHandler):
statargs['ENET_IPv4GatewayIPAddr'] = netconfig['ipv4_gateway']
elif not netutil.address_is_local(newip):
raise exc.InvalidArgumentException('Will not remotely configure a device with no gateway')
wc.grab_json_response('/api/dataset', statargs)
netset, status = wc.grab_json_response_with_status('/api/dataset', statargs)
print(repr(netset))
print(repr(status))
elif self.ipaddr.startswith('fe80::'):
self.configmanager.set_node_attributes(
{nodename: {'hardwaremanagement.manager': self.ipaddr}})
@@ -636,7 +680,7 @@ def remote_nodecfg(nodename, cfm):
ipaddr = ipaddr.split('/', 1)[0]
ipaddr = getaddrinfo(ipaddr, 0)[0][-1]
if not ipaddr:
raise Excecption('Cannot remote configure a system without known '
raise Exception('Cannot remote configure a system without known '
'address')
info = {'addresses': [ipaddr]}
nh = NodeHandler(info, cfm)

View File

@@ -0,0 +1,104 @@
# Copyright 2024 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import confluent.discovery.handlers.redfishbmc as redfishbmc
import eventlet.support.greendns
import confluent.util as util
webclient = eventlet.import_patched('pyghmi.util.webclient')
getaddrinfo = eventlet.support.greendns.getaddrinfo
class NodeHandler(redfishbmc.NodeHandler):
devname = 'XCC'
def get_firmware_default_account_info(self):
return ('USERID', 'PASSW0RD')
def scan(self):
ip, port = self.get_web_port_and_ip()
c = webclient.SecureHTTPConnection(ip, port,
verifycallback=self.validate_cert)
c.set_header('Accept', 'application/json')
i = c.grab_json_response('/api/providers/logoninfo')
modelname = i.get('items', [{}])[0].get('machine_name', None)
if modelname:
self.info['modelname'] = modelname
for attrname in list(self.info.get('attributes', {})):
val = self.info['attributes'][attrname]
if '-uuid' == attrname[-5:] and len(val) == 32:
val = val.lower()
self.info['attributes'][attrname] = '-'.join([val[:8], val[8:12], val[12:16], val[16:20], val[20:]])
attrs = self.info.get('attributes', {})
room = attrs.get('room-id', None)
if room:
self.info['room'] = room
rack = attrs.get('rack-id', None)
if rack:
self.info['rack'] = rack
name = attrs.get('name', None)
if name:
self.info['hostname'] = name
unumber = attrs.get('lowest-u', None)
if unumber:
self.info['u'] = unumber
location = attrs.get('location', None)
if location:
self.info['location'] = location
mtm = attrs.get('enclosure-machinetype-model', None)
if mtm:
self.info['modelnumber'] = mtm.strip()
sn = attrs.get('enclosure-serial-number', None)
if sn:
self.info['serialnumber'] = sn.strip()
if attrs.get('enclosure-form-factor', None) == 'dense-computing':
encuuid = attrs.get('chassis-uuid', None)
if encuuid:
self.info['enclosure.uuid'] = fixuuid(encuuid)
slot = int(attrs.get('slot', 0))
if slot != 0:
self.info['enclosure.bay'] = slot
def validate_cert(self, certificate):
fprint = util.get_fingerprint(self.https_cert)
return util.cert_matches(fprint, certificate)
def remote_nodecfg(nodename, cfm):
cfg = cfm.get_node_attributes(
nodename, 'hardwaremanagement.manager')
ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get(
'value', None)
ipaddr = ipaddr.split('/', 1)[0]
ipaddr = getaddrinfo(ipaddr, 0)[0][-1]
if not ipaddr:
raise Exception('Cannot remote configure a system without known '
'address')
info = {'addresses': [ipaddr]}
nh = NodeHandler(info, cfm)
nh.config(nodename)
if __name__ == '__main__':
import confluent.config.configmanager as cfm
c = cfm.ConfigManager(None)
import sys
info = {'addresses': [[sys.argv[1]]]}
print(repr(info))
testr = NodeHandler(info, c)
testr.config(sys.argv[2])

View File

@@ -315,9 +315,9 @@ def proxydhcp(handler, nodeguess):
optidx = rqv.tobytes().index(b'\x63\x82\x53\x63') + 4
except ValueError:
continue
hwlen = rq[2]
opts, disco = opts_to_dict(rq, optidx, 3)
disco['hwaddr'] = ':'.join(['{0:02x}'.format(x) for x in rq[28:28+hwlen]])
hwlen = rqv[2]
opts, disco = opts_to_dict(rqv, optidx, 3)
disco['hwaddr'] = ':'.join(['{0:02x}'.format(x) for x in rqv[28:28+hwlen]])
node = None
if disco.get('hwaddr', None) in macmap:
node = macmap[disco['hwaddr']]
@@ -346,7 +346,7 @@ def proxydhcp(handler, nodeguess):
profile = None
if not myipn:
myipn = socket.inet_aton(recv)
profile = get_deployment_profile(node, cfg)
profile, stgprofile = get_deployment_profile(node, cfg)
if profile:
log.log({
'info': 'Offering proxyDHCP boot from {0} to {1} ({2})'.format(recv, node, client[0])})
@@ -356,7 +356,7 @@ def proxydhcp(handler, nodeguess):
continue
if opts.get(77, None) == b'iPXE':
if not profile:
profile = get_deployment_profile(node, cfg)
profile, stgprofile = get_deployment_profile(node, cfg)
if not profile:
log.log({'info': 'No pending profile for {0}, skipping proxyDHCP reply'.format(node)})
continue
@@ -385,8 +385,9 @@ def proxydhcp(handler, nodeguess):
rpv[268:280] = b'\x3c\x09PXEClient\xff'
net4011.sendto(rpv[:281], client)
except Exception as e:
tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
log.logtrace()
# tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
# event=log.Events.stacktrace)
def start_proxydhcp(handler, nodeguess=None):
@@ -453,13 +454,14 @@ def snoop(handler, protocol=None, nodeguess=None):
# with try/except
if i < 64:
continue
_, level, typ = struct.unpack('QII', cmsgarr[:16])
if level == socket.IPPROTO_IP and typ == IP_PKTINFO:
idx, recv = struct.unpack('II', cmsgarr[16:24])
recv = ipfromint(recv)
rqv = memoryview(rawbuffer)[:i]
if rawbuffer[0] == 1: # Boot request
process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv)
_, level, typ = struct.unpack('QII', cmsgarr[:16])
if level == socket.IPPROTO_IP and typ == IP_PKTINFO:
idx, recv = struct.unpack('II', cmsgarr[16:24])
recv = ipfromint(recv)
rqv = memoryview(rawbuffer)[:i]
client = (ipfromint(clientaddr.sin_addr.s_addr), socket.htons(clientaddr.sin_port))
process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv, client)
elif netc == net6:
recv = 'ff02::1:2'
pkt, addr = netc.recvfrom(2048)
@@ -476,6 +478,10 @@ def snoop(handler, protocol=None, nodeguess=None):
tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
_mac_to_uuidmap = {}
def process_dhcp6req(handler, rqv, addr, net, cfg, nodeguess):
ip = addr[0]
req, disco = v6opts_to_dict(bytearray(rqv[4:]))
@@ -501,7 +507,7 @@ def process_dhcp6req(handler, rqv, addr, net, cfg, nodeguess):
handler(info)
consider_discover(info, req, net, cfg, None, nodeguess, addr)
def process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv):
def process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv, client):
rq = bytearray(rqv)
addrlen = rq[2]
if addrlen > 16 or addrlen == 0:
@@ -531,7 +537,12 @@ def process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv):
# We will fill out service to have something to byte into,
# but the nature of the beast is that we do not have peers,
# so that will not be present for a pxe snoop
info = {'hwaddr': netaddr, 'uuid': disco['uuid'],
theuuid = disco['uuid']
if theuuid:
_mac_to_uuidmap[netaddr] = theuuid
elif netaddr in _mac_to_uuidmap:
theuuid = _mac_to_uuidmap[netaddr]
info = {'hwaddr': netaddr, 'uuid': theuuid,
'architecture': disco['arch'],
'netinfo': {'ifidx': idx, 'recvip': recv, 'txid': txid},
'services': ('pxe-client',)}
@@ -539,7 +550,7 @@ def process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv):
and time.time() > ignoredisco.get(netaddr, 0) + 90):
ignoredisco[netaddr] = time.time()
handler(info)
consider_discover(info, rqinfo, net4, cfg, rqv, nodeguess)
consider_discover(info, rqinfo, net4, cfg, rqv, nodeguess, requestor=client)
@@ -583,29 +594,34 @@ def get_deployment_profile(node, cfg, cfd=None):
if not cfd:
cfd = cfg.get_node_attributes(node, ('deployment.*', 'collective.managercandidates'))
profile = cfd.get(node, {}).get('deployment.pendingprofile', {}).get('value', None)
if not profile:
return None
candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None)
if candmgrs:
try:
candmgrs = noderange.NodeRange(candmgrs, cfg).nodes
except Exception: # fallback to unverified noderange
candmgrs = noderange.NodeRange(candmgrs).nodes
if collective.get_myname() not in candmgrs:
return None
return profile
stgprofile = cfd.get(node, {}).get('deployment.stagedprofile', {}).get('value', None)
if profile or stgprofile:
candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None)
if candmgrs:
try:
candmgrs = noderange.NodeRange(candmgrs, cfg).nodes
except Exception: # fallback to unverified noderange
candmgrs = noderange.NodeRange(candmgrs).nodes
if collective.get_myname() not in candmgrs:
return None, None
return profile, stgprofile
staticassigns = {}
myipbypeer = {}
def check_reply(node, info, packet, sock, cfg, reqview, addr):
httpboot = info['architecture'] == 'uefi-httpboot'
def check_reply(node, info, packet, sock, cfg, reqview, addr, requestor):
if not requestor:
requestor = ('0.0.0.0', None)
if requestor[0] == '0.0.0.0' and not info.get('uuid', None):
return # ignore DHCP from local non-PXE segment
httpboot = info.get('architecture', None) == 'uefi-httpboot'
cfd = cfg.get_node_attributes(node, ('deployment.*', 'collective.managercandidates'))
profile = get_deployment_profile(node, cfg, cfd)
if not profile:
profile, stgprofile = get_deployment_profile(node, cfg, cfd)
if ((not profile)
and (requestor[0] == '0.0.0.0' or not stgprofile)):
if time.time() > ignoremacs.get(info['hwaddr'], 0) + 90:
ignoremacs[info['hwaddr']] = time.time()
log.log({'info': 'Ignoring boot attempt by {0} no deployment profile specified (uuid {1}, hwaddr {2})'.format(
node, info['uuid'], info['hwaddr']
node, info.get('uuid', 'NA'), info['hwaddr']
)})
return
if addr:
@@ -614,7 +630,7 @@ def check_reply(node, info, packet, sock, cfg, reqview, addr):
return
return reply_dhcp6(node, addr, cfg, packet, cfd, profile, sock)
else:
return reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile)
return reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile, sock, requestor)
def reply_dhcp6(node, addr, cfg, packet, cfd, profile, sock):
myaddrs = netutil.get_my_addresses(addr[-1], socket.AF_INET6)
@@ -651,14 +667,16 @@ def reply_dhcp6(node, addr, cfg, packet, cfd, profile, sock):
ipass[4:16] = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x18'
ipass[16:32] = socket.inet_pton(socket.AF_INET6, ipv6addr)
ipass[32:40] = b'\x00\x00\x00\x78\x00\x00\x01\x2c'
elif (not packet['vci']) or not packet['vci'].startswith('HTTPClient:Arch:'):
return # do not send ip-less replies to anything but HTTPClient specifically
#1 msgtype
#3 txid
#22 - server ident
#len(packet[1]) + 4 - client ident
#len(ipass) + 4 or 0
#len(url) + 4
elif (not packet['vci']) or not packet['vci'].startswith(
'HTTPClient:Arch:'):
# do not send ip-less replies to anything but HTTPClient specifically
return
# 1 msgtype
# 3 txid
# 22 - server ident
# len(packet[1]) + 4 - client ident
# len(ipass) + 4 or 0
# len(url) + 4
replylen = 50 + len(bootfile) + len(packet[1]) + 4
if len(ipass):
replylen += len(ipass)
@@ -698,26 +716,31 @@ def get_my_duid():
return _myuuid
def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile, sock=None, requestor=None):
replen = 275 # default is going to be 286
# while myipn is describing presumed destination, it's really
# vague in the face of aliases, need to convert to ifidx and evaluate
# aliases for best match to guess
isboot = True
if requestor is None:
requestor = ('0.0.0.0', None)
if info.get('architecture', None) is None:
isboot = False
rqtype = packet[53][0]
insecuremode = cfd.get(node, {}).get('deployment.useinsecureprotocols',
{}).get('value', 'never')
if not insecuremode:
insecuremode = 'never'
if insecuremode == 'never' and not httpboot:
if rqtype == 1 and info['architecture']:
log.log(
{'info': 'Boot attempt by {0} detected in insecure mode, but '
'insecure mode is disabled. Set the attribute '
'`deployment.useinsecureprotocols` to `firmware` or '
'`always` to enable support, or use UEFI HTTP boot '
'with HTTPS.'.format(node)})
return
if isboot:
insecuremode = cfd.get(node, {}).get('deployment.useinsecureprotocols',
{}).get('value', 'never')
if not insecuremode:
insecuremode = 'never'
if insecuremode == 'never' and not httpboot:
if rqtype == 1 and info.get('architecture', None):
log.log(
{'info': 'Boot attempt by {0} detected in insecure mode, but '
'insecure mode is disabled. Set the attribute '
'`deployment.useinsecureprotocols` to `firmware` or '
'`always` to enable support, or use UEFI HTTP boot '
'with HTTPS.'.format(node)})
return
reply = bytearray(512)
repview = memoryview(reply)
repview[:20] = iphdr
@@ -728,9 +751,16 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
repview[1:10] = reqview[1:10] # duplicate txid, hwlen, and others
repview[10:11] = b'\x80' # always set broadcast
repview[28:44] = reqview[28:44] # copy chaddr field
relayip = reqview[24:28].tobytes()
if (not isboot) and relayip == b'\x00\x00\x00\x00':
# Ignore local DHCP packets if it isn't a firmware request
return
relayipa = None
if relayip != b'\x00\x00\x00\x00':
relayipa = socket.inet_ntoa(relayip)
gateway = None
netmask = None
niccfg = netutil.get_nic_config(cfg, node, ifidx=info['netinfo']['ifidx'])
niccfg = netutil.get_nic_config(cfg, node, ifidx=info['netinfo']['ifidx'], relayipn=relayip)
nicerr = niccfg.get('error_msg', False)
if nicerr:
log.log({'error': nicerr})
@@ -754,7 +784,7 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
gateway = None
netmask = (2**32 - 1) ^ (2**(32 - netmask) - 1)
netmask = struct.pack('!I', netmask)
elif (not packet['vci']) or not (packet['vci'].startswith('HTTPClient:Arch:') or packet['vci'].startswith('PXEClient')):
elif (not packet.get('vci', None)) or not (packet['vci'].startswith('HTTPClient:Arch:') or packet['vci'].startswith('PXEClient')):
return # do not send ip-less replies to anything but netboot specifically
myipn = niccfg['deploy_server']
if not myipn:
@@ -774,9 +804,9 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
node, profile, len(bootfile) - 127)})
return
repview[108:108 + len(bootfile)] = bootfile
elif info['architecture'] == 'uefi-aarch64' and packet.get(77, None) == b'iPXE':
elif info.get('architecture', None) == 'uefi-aarch64' and packet.get(77, None) == b'iPXE':
if not profile:
profile = get_deployment_profile(node, cfg)
profile, stgprofile = get_deployment_profile(node, cfg)
if not profile:
log.log({'info': 'No pending profile for {0}, skipping proxyDHCP eply'.format(node)})
return
@@ -786,6 +816,7 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
myipn = socket.inet_aton(myipn)
orepview[12:16] = myipn
repview[20:24] = myipn
repview[24:28] = relayip
repview[236:240] = b'\x63\x82\x53\x63'
repview[240:242] = b'\x35\x01'
if rqtype == 1: # if discover, then offer
@@ -796,17 +827,19 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
repview[245:249] = myipn
repview[249:255] = b'\x33\x04\x00\x00\x00\xf0' # fixed short lease time
repview[255:257] = b'\x61\x11'
repview[257:274] = packet[97]
if packet.get(97, None) is not None:
repview[257:274] = packet[97]
# Note that sending PXEClient kicks off the proxyDHCP procedure, ignoring
# boot filename and such in the DHCP packet
# we will simply always do it to provide the boot payload in a consistent
# matter to both dhcp-elsewhere and fixed ip clients
if info['architecture'] == 'uefi-httpboot':
repview[replen - 1:replen + 11] = b'\x3c\x0aHTTPClient'
replen += 12
else:
repview[replen - 1:replen + 10] = b'\x3c\x09PXEClient'
replen += 11
if isboot:
if info.get('architecture', None) == 'uefi-httpboot':
repview[replen - 1:replen + 11] = b'\x3c\x0aHTTPClient'
replen += 12
else:
repview[replen - 1:replen + 10] = b'\x3c\x09PXEClient'
replen += 11
hwlen = bytearray(reqview[2:3].tobytes())[0]
fulladdr = repview[28:28+hwlen].tobytes()
myipbypeer[fulladdr] = myipn
@@ -823,13 +856,14 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
repview[replen - 1:replen + 1] = b'\x03\x04'
repview[replen + 1:replen + 5] = gateway
replen += 6
elif relayip != b'\x00\x00\x00\x00' and clipn:
log.log({'error': 'Relay DHCP offer to {} will fail due to missing gateway information'.format(node)})
if 82 in packet:
reloptionslen = len(packet[82])
reloptionshdr = struct.pack('BB', 82, reloptionslen)
repview[replen - 1:replen + 1] = reloptionshdr
repview[replen + 1:replen + reloptionslen + 1] = packet[82]
replen += 2 + reloptionslen
repview[replen - 1:replen] = b'\xff' # end of options, should always be last byte
repview = memoryview(reply)
pktlen = struct.pack('!H', replen + 28) # ip+udp = 28
@@ -853,9 +887,19 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
ipinfo = 'with static address {0}'.format(niccfg['ipv4_address'])
else:
ipinfo = 'without address, served from {0}'.format(myip)
log.log({
'info': 'Offering {0} boot {1} to {2}'.format(boottype, ipinfo, node)})
send_raw_packet(repview, replen + 28, reqview, info)
if relayipa:
ipinfo += ' (relayed to {} via {})'.format(relayipa, requestor[0])
if isboot:
log.log({
'info': 'Offering {0} boot {1} to {2}'.format(boottype, ipinfo, node)})
else:
log.log({
'info': 'Offering DHCP {} to {}'.format(ipinfo, node)})
if relayip != b'\x00\x00\x00\x00':
sock.sendto(repview[28:28 + replen], requestor)
else:
send_raw_packet(repview, replen + 28, reqview, info)
def send_raw_packet(repview, replen, reqview, info):
ifidx = info['netinfo']['ifidx']
@@ -880,9 +924,10 @@ def send_raw_packet(repview, replen, reqview, info):
sendto(tsock.fileno(), pkt, replen, 0, ctypes.byref(targ),
ctypes.sizeof(targ))
def ack_request(pkt, rq, info):
def ack_request(pkt, rq, info, sock=None, requestor=None):
hwlen = bytearray(rq[2:3].tobytes())[0]
hwaddr = rq[28:28+hwlen].tobytes()
relayip = rq[24:28].tobytes()
myipn = myipbypeer.get(hwaddr, None)
if not myipn or pkt.get(54, None) != myipn:
return
@@ -901,15 +946,20 @@ def ack_request(pkt, rq, info):
repview[12:len(rply)].tobytes())
datasum = ~datasum & 0xffff
repview[26:28] = struct.pack('!H', datasum)
send_raw_packet(repview, len(rply), rq, info)
if relayip != b'\x00\x00\x00\x00':
sock.sendto(repview[28:], requestor)
else:
send_raw_packet(repview, len(rply), rq, info)
def consider_discover(info, packet, sock, cfg, reqview, nodeguess, addr=None):
if info.get('hwaddr', None) in macmap and info.get('uuid', None):
check_reply(macmap[info['hwaddr']], info, packet, sock, cfg, reqview, addr)
def consider_discover(info, packet, sock, cfg, reqview, nodeguess, addr=None, requestor=None):
if packet.get(53, None) == b'\x03':
ack_request(packet, reqview, info, sock, requestor)
elif info.get('hwaddr', None) in macmap: # and info.get('uuid', None):
check_reply(macmap[info['hwaddr']], info, packet, sock, cfg, reqview, addr, requestor)
elif info.get('uuid', None) in uuidmap:
check_reply(uuidmap[info['uuid']], info, packet, sock, cfg, reqview, addr)
check_reply(uuidmap[info['uuid']], info, packet, sock, cfg, reqview, addr, requestor)
elif packet.get(53, None) == b'\x03':
ack_request(packet, reqview, info)
ack_request(packet, reqview, info, sock, requestor)
elif info.get('uuid', None) and info.get('hwaddr', None):
if time.time() > ignoremacs.get(info['hwaddr'], 0) + 90:
ignoremacs[info['hwaddr']] = time.time()

View File

@@ -471,10 +471,13 @@ def snoop(handler, protocol=None):
# socket in use can occur when aliased ipv4 are encountered
net.bind(('', 427))
net4.bind(('', 427))
newmacs = set([])
known_peers = set([])
peerbymacaddress = {}
deferpeers = []
while True:
try:
newmacs = set([])
newmacs.clear()
r, _, _ = select.select((net, net4), (), (), 60)
# clear known_peers and peerbymacaddress
# to avoid stale info getting in...
@@ -482,14 +485,16 @@ def snoop(handler, protocol=None):
# addresses that come close together
# calling code needs to understand deeper context, as snoop
# will now yield dupe info over time
known_peers = set([])
peerbymacaddress = {}
deferpeers = []
known_peers.clear()
peerbymacaddress.clear()
deferpeers.clear()
while r and len(deferpeers) < 256:
for s in r:
(rsp, peer) = s.recvfrom(9000)
if peer in known_peers:
continue
if peer in deferpeers:
continue
mac = neighutil.get_hwaddr(peer[0])
if not mac:
probepeer = (peer[0], struct.unpack('H', os.urandom(2))[0] | 1025) + peer[2:]

View File

@@ -60,6 +60,7 @@ def active_scan(handler, protocol=None):
known_peers = set([])
for scanned in scan(['urn:dmtf-org:service:redfish-rest:1', 'urn::service:affluent']):
for addr in scanned['addresses']:
addr = addr[0:1] + addr[2:]
if addr in known_peers:
break
hwaddr = neighutil.get_hwaddr(addr[0])
@@ -79,13 +80,20 @@ def scan(services, target=None):
def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byehandler, machandlers, handler):
if mac in peerbymacaddress and peer not in peerbymacaddress[mac]['addresses']:
peerbymacaddress[mac]['addresses'].append(peer)
if mac in peerbymacaddress:
normpeer = peer[0:1] + peer[2:]
for currpeer in peerbymacaddress[mac]['addresses']:
currnormpeer = currpeer[0:1] + peer[2:]
if currnormpeer == normpeer:
break
else:
peerbymacaddress[mac]['addresses'].append(peer)
else:
peerdata = {
'hwaddr': mac,
'addresses': [peer],
}
targurl = None
for headline in rsp[1:]:
if not headline:
continue
@@ -105,13 +113,21 @@ def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byeha
if not value.endswith('/redfish/v1/'):
return
elif header == 'LOCATION':
if not value.endswith('/DeviceDescription.json'):
if '/eth' in value and value.endswith('.xml'):
targurl = '/redfish/v1/'
targtype = 'megarac-bmc'
continue # MegaRAC redfish
elif value.endswith('/DeviceDescription.json'):
targurl = '/DeviceDescription.json'
targtype = 'lenovo-xcc'
continue
else:
return
if handler:
eventlet.spawn_n(check_fish_handler, handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer)
if handler and targurl:
eventlet.spawn_n(check_fish_handler, handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer, targurl, targtype)
def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer):
retdata = check_fish(('/DeviceDescription.json', peerdata))
def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer, targurl, targtype):
retdata = check_fish((targurl, peerdata, targtype))
if retdata:
known_peers.add(peer)
newmacs.add(mac)
@@ -164,11 +180,14 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
net4.bind(('', 1900))
net6.bind(('', 1900))
peerbymacaddress = {}
newmacs = set([])
deferrednotifies = []
machandlers = {}
while True:
try:
newmacs = set([])
deferrednotifies = []
machandlers = {}
newmacs.clear()
deferrednotifies.clear()
machandlers.clear()
r = select.select((net4, net6), (), (), 60)
if r:
r = r[0]
@@ -251,7 +270,10 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
break
candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None)
if candmgrs:
candmgrs = noderange.NodeRange(candmgrs, cfg).nodes
try:
candmgrs = noderange.NodeRange(candmgrs, cfg).nodes
except Exception:
candmgrs = noderange.NodeRange(candmgrs).nodes
if collective.get_myname() not in candmgrs:
break
currtime = time.time()
@@ -322,7 +344,7 @@ def _find_service(service, target):
host = '[{0}]'.format(host)
msg = smsg.format(host, service)
if not isinstance(msg, bytes):
msg = msg.encode('utf8')
msg = msg.encode('utf8')
net6.sendto(msg, addr[4])
else:
net4.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
@@ -410,7 +432,11 @@ def _find_service(service, target):
if '/redfish/v1/' not in peerdata[nid].get('urls', ()) and '/redfish/v1' not in peerdata[nid].get('urls', ()):
continue
if '/DeviceDescription.json' in peerdata[nid]['urls']:
pooltargs.append(('/DeviceDescription.json', peerdata[nid]))
pooltargs.append(('/DeviceDescription.json', peerdata[nid], 'lenovo-xcc'))
else:
for targurl in peerdata[nid]['urls']:
if '/eth' in targurl and targurl.endswith('.xml'):
pooltargs.append(('/redfish/v1/', peerdata[nid], 'megarac-bmc'))
# For now, don't interrogate generic redfish bmcs
# This is due to a need to deduplicate from some supported SLP
# targets (IMM, TSM, others)
@@ -425,21 +451,32 @@ def _find_service(service, target):
def check_fish(urldata, port=443, verifycallback=None):
if not verifycallback:
verifycallback = lambda x: True
url, data = urldata
try:
url, data, targtype = urldata
except ValueError:
url, data = urldata
targtype = 'service:redfish-bmc'
try:
wc = webclient.SecureHTTPConnection(_get_svrip(data), port, verifycallback=verifycallback, timeout=1.5)
peerinfo = wc.grab_json_response(url)
peerinfo = wc.grab_json_response(url, headers={'Accept': 'application/json'})
except socket.error:
return None
if url == '/DeviceDescription.json':
if not peerinfo:
return None
try:
peerinfo = peerinfo[0]
except KeyError:
peerinfo['xcc-variant'] = '3'
except IndexError:
return None
try:
myuuid = peerinfo['node-uuid'].lower()
if '-' not in myuuid:
myuuid = '-'.join([myuuid[:8], myuuid[8:12], myuuid[12:16], myuuid[16:20], myuuid[20:]])
data['uuid'] = myuuid
data['attributes'] = peerinfo
data['services'] = ['lenovo-xcc']
data['services'] = ['lenovo-xcc'] if 'xcc-variant' not in peerinfo else ['lenovo-xcc' + peerinfo['xcc-variant']]
return data
except (IndexError, KeyError):
return None
@@ -447,7 +484,7 @@ def check_fish(urldata, port=443, verifycallback=None):
peerinfo = wc.grab_json_response('/redfish/v1/')
if url == '/redfish/v1/':
if 'UUID' in peerinfo:
data['services'] = ['service:redfish-bmc']
data['services'] = [targtype]
data['uuid'] = peerinfo['UUID'].lower()
return data
return None
@@ -466,7 +503,12 @@ def _parse_ssdp(peer, rsp, peerdata):
if code == b'200':
if nid in peerdata:
peerdatum = peerdata[nid]
if peer not in peerdatum['addresses']:
normpeer = peer[0:1] + peer[2:]
for currpeer in peerdatum['addresses']:
currnormpeer = currpeer[0:1] + peer[2:]
if currnormpeer == normpeer:
break
else:
peerdatum['addresses'].append(peer)
else:
peerdatum = {
@@ -501,5 +543,7 @@ def _parse_ssdp(peer, rsp, peerdata):
if __name__ == '__main__':
def printit(rsp):
print(repr(rsp))
pass # print(repr(rsp))
active_scan(printit)

View File

@@ -72,6 +72,20 @@ opmap = {
}
def get_user_for_session(sessionid, sessiontok):
if not isinstance(sessionid, str):
sessionid = sessionid.decode()
if not isinstance(sessiontok, str):
sessiontok = sessiontok.decode()
if not sessiontok or not sessionid:
raise Exception("invalid session id or token")
if sessiontok != httpsessions.get(sessionid, {}).get('csrftoken', None):
raise Exception("Invalid csrf token for session")
user = httpsessions[sessionid]['name']
if not isinstance(user, str):
user = user.decode()
return user
def group_creation_resources():
yield confluent.messages.Attributes(
kv={'name': None}, desc="Name of the group").html() + '<br>'
@@ -175,6 +189,8 @@ def _get_query_dict(env, reqbody, reqtype):
qstring = None
if qstring:
for qpair in qstring.split('&'):
if '=' not in qpair:
continue
qkey, qvalue = qpair.split('=')
qdict[qkey] = qvalue
if reqbody is not None:
@@ -668,7 +684,11 @@ def resourcehandler_backend(env, start_response):
if 'CONTENT_LENGTH' in env and int(env['CONTENT_LENGTH']) > 0:
reqbody = env['wsgi.input'].read(int(env['CONTENT_LENGTH']))
reqtype = env['CONTENT_TYPE']
operation = opmap[env['REQUEST_METHOD']]
operation = opmap.get(env['REQUEST_METHOD'], None)
if not operation:
start_response('400 Bad Method', headers)
yield ''
return
querydict = _get_query_dict(env, reqbody, reqtype)
if operation != 'retrieve' and 'restexplorerop' in querydict:
operation = querydict['restexplorerop']
@@ -915,6 +935,45 @@ def resourcehandler_backend(env, start_response):
start_response('200 OK', headers)
yield rsp
return
elif (operation == 'create' and ('/staging' in env['PATH_INFO'])):
url = env['PATH_INFO']
args_dict = {}
content_length = int(env.get('CONTENT_LENGTH', 0))
if content_length > 0 and (len(url.split('/')) > 2):
# check if the user and the url defined user are the same
if authorized['username'] == url.split('/')[2]:
args_dict.update({'filedata':env, 'content_length': content_length})
hdlr = pluginapi.handle_path(url, operation, cfgmgr, args_dict)
for resp in hdlr:
if isinstance(resp, confluent.messages.FileUploadProgress):
if resp.kvpairs['progress']['value'] == 100:
progress = resp.kvpairs['progress']['value']
start_response('200 OK', headers)
yield json.dumps({'data': 'done'})
return
else:
start_response('401 Unauthorized', headers)
yield json.dumps({'data': 'You do not have permission to write to file'})
return
elif 'application/json' in reqtype and (len(url.split('/')) == 2):
if not isinstance(reqbody, str):
reqbody = reqbody.decode('utf8')
pbody = json.loads(reqbody)
args = pbody['args']
args_dict.update({'filename': args, 'user': authorized['username']})
try:
args_dict.update({'bank': pbody['bank']})
except KeyError:
pass
hdlr = pluginapi.handle_path(url, operation, cfgmgr, args_dict)
for res in hdlr:
if isinstance(res, confluent.messages.CreatedResource):
stageurl = res.kvpairs['created']
start_response('200 OK', headers)
yield json.dumps({'data': stageurl})
return
else:
# normal request
url = env['PATH_INFO']

View File

@@ -220,16 +220,20 @@ def setlimits():
def assure_ownership(path):
try:
if os.getuid() != os.stat(path).st_uid:
sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path))
if os.getuid() == 0:
sys.stderr.write('Attempting to run as root, when non-root usage is detected\n')
else:
sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path))
sys.exit(1)
except OSError as e:
if e.errno == 13:
sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path))
if os.getuid() == 0:
sys.stderr.write('Attempting to run as root, when non-root usage is detected\n')
else:
sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path))
sys.exit(1)
def sanity_check():
if os.getuid() == 0:
return True
assure_ownership('/etc/confluent')
assure_ownership('/etc/confluent/cfg')
for filename in glob.glob('/etc/confluent/cfg/*'):

View File

@@ -262,10 +262,10 @@ class Generic(ConfluentMessage):
def json(self):
return json.dumps(self.data)
def raw(self):
return self.data
def html(self):
return json.dumps(self.data)
@@ -344,10 +344,10 @@ class ConfluentResourceCount(ConfluentMessage):
self.myargs = [count]
self.desc = 'Resource Count'
self.kvpairs = {'count': count}
def strip_node(self, node):
pass
class CreatedResource(ConfluentMessage):
notnode = True
readonly = True
@@ -569,6 +569,8 @@ def get_input_message(path, operation, inputdata, nodes=None, multinode=False,
return InputLicense(path, nodes, inputdata, configmanager)
elif path == ['deployment', 'ident_image']:
return InputIdentImage(path, nodes, inputdata)
elif path == ['console', 'ikvm']:
return InputIkvmParams(path, nodes, inputdata)
elif inputdata:
raise exc.InvalidArgumentException(
'No known input handler for request')
@@ -638,6 +640,18 @@ class SavedFile(ConfluentMessage):
self.myargs = (node, file)
self.kvpairs = {node: {'filename': file}}
class FileUploadProgress(ConfluentMessage):
readonly = True
def __init__(self, progress, name=None):
self.myargs = (progress)
self.stripped = False
self.notnode = name is None
if self.notnode:
self.kvpairs = {'progress': {'value': progress}}
else:
self.kvpairs = {name: {'progress': {'value': progress}}}
class InputAlertData(ConfluentMessage):
def __init__(self, path, inputdata, nodes=None):
@@ -936,6 +950,9 @@ class InputIdentImage(ConfluentInputMessage):
keyname = 'ident_image'
valid_values = ['create']
class InputIkvmParams(ConfluentInputMessage):
keyname = 'method'
valid_values = ['unix', 'wss']
class InputIdentifyMessage(ConfluentInputMessage):
valid_values = set([

View File

@@ -193,6 +193,9 @@ class NetManager(object):
iname = attribs.get('interface_names', None)
if iname:
myattribs['interface_names'] = iname
vlanid = attribs.get('vlan_id', None)
if vlanid:
myattribs['vlan_id'] = vlanid
teammod = attribs.get('team_mode', None)
if teammod:
myattribs['team_mode'] = teammod
@@ -320,7 +323,7 @@ def get_full_net_config(configmanager, node, serverip=None):
if val is None:
continue
if attrib.startswith('net.'):
attrib = attrib.replace('net.', '').rsplit('.', 1)
attrib = attrib.replace('net.', '', 1).rsplit('.', 1)
if len(attrib) == 1:
iface = None
attrib = attrib[0]
@@ -405,7 +408,8 @@ def noneify(cfgdata):
# the ip as reported by recvmsg to match the subnet of that net.* interface
# if switch and port available, that should match.
def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None,
serverip=None):
serverip=None, relayipn=b'\x00\x00\x00\x00',
clientip=None):
"""Fetch network configuration parameters for a nic
For a given node and interface, find and retrieve the pertinent network
@@ -426,6 +430,28 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None,
#TODO(jjohnson2): ip address, prefix length, mac address,
# join a bond/bridge, vlan configs, etc.
# also other nic criteria, physical location, driver and index...
clientfam = None
clientipn = None
serverfam = None
serveripn = None
llaipn = socket.inet_pton(socket.AF_INET6, 'fe80::')
if serverip is not None:
if '.' in serverip:
serverfam = socket.AF_INET
elif ':' in serverip:
serverfam = socket.AF_INET6
if serverfam:
serveripn = socket.inet_pton(serverfam, serverip)
if clientip is not None:
if '%' in clientip:
# link local, don't even bother'
clientfam = None
elif '.' in clientip:
clientfam = socket.AF_INET
elif ':' in clientip:
clientfam = socket.AF_INET6
if clientfam:
clientipn = socket.inet_pton(clientfam, clientip)
nodenetattribs = configmanager.get_node_attributes(
node, 'net*').get(node, {})
cfgbyname = {}
@@ -463,9 +489,22 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None,
cfgdata['ipv4_broken'] = True
if v6broken:
cfgdata['ipv6_broken'] = True
isremote = False
if serverip is not None:
dhcprequested = False
myaddrs = get_addresses_by_serverip(serverip)
if serverfam == socket.AF_INET6 and ipn_on_same_subnet(serverfam, serveripn, llaipn, 64):
isremote = False
elif clientfam:
for myaddr in myaddrs:
# we may have received over a local vlan, wrong aliased subnet
# so have to check for *any* potential matches
fam, svrip, prefix = myaddr[:3]
if fam == clientfam:
if ipn_on_same_subnet(fam, clientipn, svrip, prefix):
break
else:
isremote = True
genericmethod = 'static'
ipbynodename = None
ip6bynodename = None
@@ -486,6 +525,10 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None,
bestsrvbyfam = {}
for myaddr in myaddrs:
fam, svrip, prefix = myaddr[:3]
if fam == socket.AF_INET and relayipn != b'\x00\x00\x00\x00':
bootsvrip = relayipn
else:
bootsvrip = svrip
candsrvs.append((fam, svrip, prefix))
if fam == socket.AF_INET:
nver = '4'
@@ -505,14 +548,17 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None,
candip = cfgbyname[candidate].get('ipv{}_address'.format(nver), None)
if candip and '/' in candip:
candip, candprefix = candip.split('/')
if int(candprefix) != prefix:
if fam == socket.AF_INET and relayipn != b'\x00\x00\x00\x00':
prefix = int(candprefix)
if (not isremote) and int(candprefix) != prefix:
continue
candgw = cfgbyname[candidate].get('ipv{}_gateway'.format(nver), None)
if candip:
try:
for inf in socket.getaddrinfo(candip, 0, fam, socket.SOCK_STREAM):
candipn = socket.inet_pton(fam, inf[-1][0])
if ipn_on_same_subnet(fam, svrip, candipn, prefix):
if ((isremote and ipn_on_same_subnet(fam, clientipn, candipn, int(candprefix)))
or ipn_on_same_subnet(fam, bootsvrip, candipn, prefix)):
bestsrvbyfam[fam] = svrip
cfgdata['ipv{}_address'.format(nver)] = candip
cfgdata['ipv{}_method'.format(nver)] = ipmethod
@@ -530,7 +576,7 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None,
elif candgw:
for inf in socket.getaddrinfo(candgw, 0, fam, socket.SOCK_STREAM):
candgwn = socket.inet_pton(fam, inf[-1][0])
if ipn_on_same_subnet(fam, svrip, candgwn, prefix):
if ipn_on_same_subnet(fam, bootsvrip, candgwn, prefix):
candgws.append((fam, candgwn, prefix))
if foundaddr:
return noneify(cfgdata)

View File

@@ -155,19 +155,38 @@ def update_boot_esxi(profiledir, profile, label):
def find_glob(loc, fileglob):
grubcfgs = []
for cdir, _, fs in os.walk(loc):
for f in fs:
if fnmatch(f, fileglob):
return os.path.join(cdir, f)
return None
grubcfgs.append(os.path.join(cdir, f))
return grubcfgs
def update_boot_linux(profiledir, profile, label):
profname = os.path.basename(profiledir)
kernelargs = profile.get('kernelargs', '')
needefi = False
for grubexe in glob.glob(profiledir + '/boot/efi/boot/grubx64.efi'):
with open(grubexe, 'rb') as grubin:
grubcontent = grubin.read()
uaidx = grubcontent.find(b'User-Agent: GRUB 2.0')
if uaidx > 0:
grubcontent = grubcontent[uaidx:]
cridx = grubcontent.find(b'\r')
if cridx > 1:
grubcontent = grubcontent[:cridx]
grubver = grubcontent.split(b'~', 1)[0]
grubver = grubver.rsplit(b' ', 1)[-1]
grubver = grubver.split(b'.')
if len(grubver) > 1:
if int(grubver[0]) < 3 and int(grubver[1]) < 3:
needefi = True
lincmd = 'linuxefi' if needefi else 'linux'
initrdcmd = 'initrdefi' if needefi else 'initrd'
grubcfg = "set timeout=5\nmenuentry '"
grubcfg += label
grubcfg += "' {\n linuxefi /kernel " + kernelargs + "\n"
grubcfg += "' {\n " + lincmd + " /kernel " + kernelargs + "\n"
initrds = []
for initramfs in glob.glob(profiledir + '/boot/initramfs/*.cpio'):
initramfs = os.path.basename(initramfs)
@@ -175,16 +194,21 @@ def update_boot_linux(profiledir, profile, label):
for initramfs in os.listdir(profiledir + '/boot/initramfs'):
if initramfs not in initrds:
initrds.append(initramfs)
grubcfg += " initrdefi "
grubcfg += " " + initrdcmd + " "
for initramfs in initrds:
grubcfg += " /initramfs/{0}".format(initramfs)
grubcfg += "\n}\n"
# well need to honor grubprefix path if different
grubcfgpath = find_glob(profiledir + '/boot', 'grub.cfg')
if not grubcfgpath:
grubcfgpath = profiledir + '/boot/efi/boot/grub.cfg'
with open(grubcfgpath, 'w') as grubout:
grubout.write(grubcfg)
grubcfgpath = [
profiledir + '/boot/efi/boot/grub.cfg',
profiledir + '/boot/boot/grub/grub.cfg'
]
for grubcfgpth in grubcfgpath:
os.makedirs(os.path.dirname(grubcfgpth), 0o755, exist_ok=True)
with open(grubcfgpth, 'w') as grubout:
grubout.write(grubcfg)
ipxeargs = kernelargs
for initramfs in initrds:
ipxeargs += " initrd=" + initramfs

View File

@@ -0,0 +1,34 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2024 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This provides linkage between vinz and confluent, with support
# for getting session authorization from the BMC
import confluent.vinzmanager as vinzmanager
import confluent.messages as msg
def create(nodes, element, configmanager, inputdata):
for node in nodes:
url = vinzmanager.get_url(node, inputdata)
yield msg.ChildCollection(url)
def update(nodes, element, configmanager, inputdata):
for node in nodes:
url = vinzmanager.get_url(node, inputdata)
yield msg.ChildCollection(url)

View File

@@ -119,6 +119,7 @@ class TsmConsole(conapi.Console):
self.datacallback = None
self.nodeconfig = config
self.connected = False
self.recvr = None
def recvdata(self):
@@ -134,22 +135,30 @@ class TsmConsole(conapi.Console):
kv = util.TLSCertVerifier(
self.nodeconfig, self.node, 'pubkeys.tls_hardwaremanager').verify_cert
wc = webclient.SecureHTTPConnection(self.origbmc, 443, verifycallback=kv)
rsp = wc.grab_json_response_with_status('/login', {'data': [self.username.decode('utf8'), self.password.decode("utf8")]}, headers={'Content-Type': 'application/json'})
try:
rsp = wc.grab_json_response_with_status('/login', {'data': [self.username.decode('utf8'), self.password.decode("utf8")]}, headers={'Content-Type': 'application/json', 'Accept': 'application/json'})
except Exception as e:
raise cexc.TargetEndpointUnreachable(str(e))
if rsp[1] > 400:
raise cexc.TargetEndpointBadCredentials
bmc = self.bmc
if '%' in self.bmc:
prefix = self.bmc.split('%')[0]
bmc = prefix + ']'
self.ws = WrappedWebSocket(host=bmc)
self.ws.set_verify_callback(kv)
self.ws.connect('wss://{0}/console0'.format(self.bmc), host=bmc, cookie='XSRF-TOKEN={0}; SESSION={1}'.format(wc.cookies['XSRF-TOKEN'], wc.cookies['SESSION']))
self.ws.connect('wss://{0}/console0'.format(self.bmc), host=bmc, cookie='XSRF-TOKEN={0}; SESSION={1}'.format(wc.cookies['XSRF-TOKEN'], wc.cookies['SESSION']), subprotocols=[wc.cookies['XSRF-TOKEN']])
self.connected = True
eventlet.spawn_n(self.recvdata)
self.recvr = eventlet.spawn(self.recvdata)
return
def write(self, data):
self.ws.send(data)
def close(self):
if self.recvr:
self.recvr.kill()
self.recvr = None
if self.ws:
self.ws.close()
self.connected = False

View File

@@ -45,6 +45,19 @@ class WebClient(object):
'target certificate fingerprint and '
'pubkeys.tls_hardwaremanager attribute'))
return {}
except (socket.gaierror, socket.herror, TimeoutError) as e:
results.put(msg.ConfluentTargetTimeout(self.node, str(e)))
return {}
except OSError as e:
if e.errno == 113:
results.put(msg.ConfluentTargetTimeout(self.node))
else:
results.put(msg.ConfluentTargetTimeout(self.node), str(e))
return {}
except Exception as e:
results.put(msg.ConfluentNodeError(self.node,
repr(e)))
return {}
if status == 401:
results.put(msg.ConfluentTargetInvalidCredentials(self.node, 'Unable to authenticate'))
return {}
@@ -115,9 +128,7 @@ def retrieve(nodes, element, configmanager, inputdata):
results = queue.LightQueue()
workers = set([])
if element == ['power', 'state']:
for node in nodes:
yield msg.PowerState(node=node, state='on')
return
_run_method(retrieve_power, workers, results, configmanager, nodes, element)
elif element == ['health', 'hardware']:
_run_method(retrieve_health, workers, results, configmanager, nodes, element)
elif element[:3] == ['inventory', 'hardware', 'all']:
@@ -188,9 +199,15 @@ def retrieve_sensors(configmanager, creds, node, results, element):
def retrieve_power(configmanager, creds, node, results, element):
wc = WebClient(node, configmanager, creds)
hinfo = wc.fetch('/affluent/health', results)
if hinfo:
results.put(msg.PowerState(node=node, state='on'))
def retrieve_health(configmanager, creds, node, results, element):
wc = WebClient(node, configmanager, creds)
hinfo = wc.fetch('/affluent/health', results)
hinfo = wc.fetch('/affluent/health', results)
if hinfo:
results.put(msg.HealthSummary(hinfo.get('health', 'unknown'), name=node))
results.put(msg.SensorReadings(hinfo.get('sensors', []), name=node))

View File

@@ -0,0 +1,347 @@
# Copyright 2019 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Noncritical:
# - One or more temperature sensors is in the warning range;
#Critical:
# - One or more temperature sensors is in the failure range;
# - One or more fans are running < 100 RPM;
# - One power supply is off.
import re
import eventlet
import eventlet.queue as queue
import confluent.exceptions as exc
webclient = eventlet.import_patched("pyghmi.util.webclient")
import confluent.messages as msg
import confluent.util as util
import confluent.plugins.shell.ssh as ssh
class SwitchSensor(object):
def __init__(self, name, states=None, units=None, value=None, health=None):
self.name = name
self.value = value
self.states = states
self.health = health
self.units = units
def _run_method(method, workers, results, configmanager, nodes, element):
creds = configmanager.get_node_attributes(
nodes, ["switchuser", "switchpass", "secret.hardwaremanagementpassword",
"secret.hardwaremanagementuser"], decrypt=True)
for node in nodes:
workers.add(eventlet.spawn(method, configmanager, creds,
node, results, element))
def enos_login(node, configmanager, creds):
try:
ukey = "switchuser"
upass = "switchpass"
if ukey not in creds and "secret.hardwaremanagementuser" in creds[node]:
ukey = "secret.hardwaremanagementuser"
upass = "secret.hardwaremanagementpassword"
if ukey not in creds[node]:
raise exc.TargetEndpointBadCredentials("Unable to authenticate - switchuser or secret.hardwaremanagementuser not set")
user = creds[node][ukey]["value"]
if upass not in creds[node]:
passwd = None
else:
passwd = creds[node][upass]["value"]
nssh = ssh.SshConn(node=node, config=configmanager, username=user, password=passwd)
nssh.do_logon()
return nssh
except Exception as e:
raise exc.TargetEndpointBadCredentials(f"Unable to authenticate {e}")
def enos_version(ssh):
sshStdout, sshStderr = ssh.exec_command(cmd="show", cmdargs=["version"])
return sshStdout
def update(nodes, element, configmanager, inputdata):
for node in nodes:
yield msg.ConfluentNodeError(node, "Not Implemented")
def delete(nodes, element, configmanager, inputdata):
for node in nodes:
yield msg.ConfluentNodeError(node, "Not Implemented")
def create(nodes, element, configmanager, inputdata):
for node in nodes:
yield msg.ConfluentNodeError(node, "Not Implemented")
def retrieve(nodes, element, configmanager, inputdata):
results = queue.LightQueue()
workers = set([])
if element == ["power", "state"]:
for node in nodes:
yield msg.PowerState(node=node, state="on")
return
elif element == ["health", "hardware"]:
_run_method(retrieve_health, workers, results, configmanager, nodes, element)
elif element[:3] == ["inventory", "hardware", "all"]:
_run_method(retrieve_inventory, workers, results, configmanager, nodes, element)
elif element[:3] == ["inventory", "firmware", "all"]:
_run_method(retrieve_firmware, workers, results, configmanager, nodes, element)
elif element[:3] == ["sensors", "hardware", "all"]:
_run_method(retrieve_sensors, workers, results, configmanager, nodes, element)
else:
for node in nodes:
yield msg.ConfluentNodeError(node, f"Not Implemented: {element}")
return
currtimeout = 10
while workers:
try:
datum = results.get(10)
while datum:
if datum:
yield datum
datum = results.get_nowait()
except queue.Empty:
pass
eventlet.sleep(0.001)
for t in list(workers):
if t.dead:
workers.discard(t)
try:
while True:
datum = results.get_nowait()
if datum:
yield datum
except queue.Empty:
pass
def retrieve_inventory(configmanager, creds, node, results, element):
if len(element) == 3:
results.put(msg.ChildCollection("all"))
results.put(msg.ChildCollection("system"))
return
switch = gather_data(configmanager, creds, node)
invinfo = switch["inventory"]
for fan, data in switch["fans"].items():
invinfo["inventory"][0]["information"][f"Fan #{fan}"] = data["state"]
for psu, data in switch["psus"].items():
invinfo["inventory"][0]["information"][f"PSU #{psu}"] = data["state"]
results.put(msg.KeyValueData(invinfo, node))
def gather_data(configmanager, creds, node):
nssh = enos_login(node=node, configmanager=configmanager, creds=creds)
switch_lines = enos_version(ssh=nssh)
switch_data = {}
sysinfo = {"Product name": {"regex": ".*RackSwitch (\w+)"},
"Serial Number": {"regex": "ESN\s*\w*\s*: ([\w-]+)"},
"Board Serial Number": {"regex": "Switch Serial No: (\w+)"},
"Model": {"regex": "MTM\s*\w*\s*: ([\w-]+)"},
"FRU Number": {"regex": "Hardware Part\s*\w*\s*: (\w+)"},
"Airflow": {"regex": "System Fan Airflow\s*\w*\s*: ([\w-]+)"},
}
invinfo = {
"inventory": [{
"name": "System",
"present": True,
"information": {
"Manufacturer": "Lenovo",
}
}]
}
switch_data["sensors"] = []
switch_data["fans"] = gather_fans(switch_lines)
for fan, data in switch_data["fans"].items():
if "rpm" in data:
health = "ok"
if int(data["rpm"]) < 100:
health = "critical"
switch_data["sensors"].append(SwitchSensor(name=f"Fan {fan}", value=data['rpm'],
units="RPM", health=health))
switch_data["psus"] = gather_psus(switch_lines)
# Hunt for the temp limits
phylimit = {"warn": None, "shut": None}
templimit = {"warn": None, "shut": None}
for line in switch_lines:
match = re.match(r"([\w\s]+)Warning[\w\s]+\s(\d+)[\sA-Za-z\/]+\s(\d+)[\s\w\/]+\s(\d*)", line)
if match:
if "System" in match.group(1):
templimit["warn"] = int(match.group(2))
templimit["shut"] = int(match.group(3))
elif "PHYs" in match.group(1):
phylimit["warn"] = int(match.group(2))
phylimit["shut"] = int(match.group(3))
if not phylimit["warn"]:
phylimit = templimit
for line in switch_lines:
# match the inventory data
for key in sysinfo.keys():
match = re.match(re.compile(sysinfo[key]["regex"]), line)
if match:
invinfo["inventory"][0]["information"][key] = match.group(1).strip()
# match temp sensors logging where failed
match = re.match(r"Temperature\s+([\d\s\w]+)\s*:\s*(\d+)+\s+([CF])+", line)
if match:
health = "ok"
temp = int(match.group(2))
name = f"{match.group(1).strip()} Temp"
if "Phy" in name:
if temp > phylimit["warn"]:
health = "warning"
if temp > phylimit["shut"]:
health = "critical"
else:
if temp > templimit["warn"]:
health = "warning"
if temp > templimit["shut"]:
health = "critical"
switch_data["sensors"].append(SwitchSensor(name=name,
value=temp, units=f"°{match.group(3)}", health=health))
match = re.match(r"\s*(\w+) Faults\s*:\s+(.+)", line)
if match and match.group(2) not in ["()", "None"]:
switch_data["sensors"].append(SwitchSensor(name=f"{match.group(1)} Fault",
value=match.group(2).strip(), units="", health="critical"))
switch_data["inventory"] = invinfo
sysfw = {"Software Version": "Unknown", "Boot kernel": "Unknown"}
for line in switch_lines:
for key in sysfw.keys():
regex = f"{key}\s*\w*\s* ([0-9.]+)"
match = re.match(re.compile(regex), line)
if match:
sysfw[key] = match.group(1)
switch_data["firmware"] = sysfw
return switch_data
def gather_psus(data):
psus = {}
for line in data:
# some switches are:
# Power Supply 1: Back-To-Front
# others are:
# Internal Power Supply: On
if "Power Supply" in line:
match = re.match(re.compile(f"Power Supply (\d)+.*"), line)
if match:
psu = match.group(1)
if psu not in psus:
psus[psu] = {}
m = re.match(r".+\s+(\w+\-\w+\-\w+)\s*\[*.*$", line)
if m:
psus[psu]["airflow"] = m.group(1)
psus[psu]["state"] = "Present"
else:
psus[psu]["state"] = "Not installed"
else:
for psu in range(1, 10):
if "Power Supply" in line and psu not in psus:
if psu not in psus:
psus[psu] = {}
if "Not Installed" in line:
psus[psu]["state"] = "Not installed"
break
else:
psus[psu]["state"] = "Present"
break
return psus
def gather_fans(data):
fans = {}
for line in data:
# look for presence of fans
if "Fan" in line:
match = re.match(re.compile(f"Fan (\d)+.*"), line)
if match:
fan = match.group(1)
if match:
if fan not in fans:
fans[fan] = {}
if "rpm" in line or "RPM" in line:
if "Module" in line:
m = re.search(r"Module\s+(\d)+:", line)
if m:
fans[fan]["Module"] = m.group(1)
fans[fan]["state"] = "Present"
m = re.search(r"(\d+)\s*:\s+(RPM=)*(\d+)(rpm)*", line)
if m:
fans[fan]["rpm"] = m.group(3)
m = re.search(r"\s+(PWM=)*(\d+)(%|pwm)+", line)
if m:
fans[fan]["pwm"] = m.group(2)
m = re.search(r"(.+)\s+(\w+\-\w+\-\w+)$", line)
if m:
fans[fan]["airflow"] = m.group(1)
else:
fans[fan]["state"] = "Not installed"
return fans
def retrieve_firmware(configmanager, creds, node, results, element):
if len(element) == 3:
results.put(msg.ChildCollection("all"))
return
sysinfo = gather_data(configmanager, creds, node)["firmware"]
items = [{
"Software": {"version": sysinfo["Software Version"]},
},
{
"Boot kernel": {"version": sysinfo["Boot kernel"]},
}]
results.put(msg.Firmware(items, node))
def retrieve_health(configmanager, creds, node, results, element):
switch = gather_data(configmanager, creds, node)
badreadings = []
summary = "ok"
sensors = gather_data(configmanager, creds, node)["sensors"]
for sensor in sensors:
if sensor.health not in ["ok"]:
if sensor.health in ["critical"]:
summary = "critical"
elif summary in ["ok"] and sensor.health in ["warning"]:
summary = "warning"
badreadings.append(sensor)
results.put(msg.HealthSummary(summary, name=node))
results.put(msg.SensorReadings(badreadings, name=node))
def retrieve_sensors(configmanager, creds, node, results, element):
sensors = gather_data(configmanager, creds, node)["sensors"]
results.put(msg.SensorReadings(sensors, node))

View File

@@ -1,13 +1,13 @@
# Copyright 2022 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
@@ -20,12 +20,15 @@ import eventlet.green.time as time
import eventlet
import eventlet.greenpool as greenpool
def simplify_name(name):
return name.lower().replace(' ', '_').replace('/', '-').replace(
'_-_', '-')
return name.lower().replace(' ', '_').replace('/', '-').replace('_-_', '-')
pdupool = greenpool.GreenPool(128)
def data_by_type(indata):
databytype = {}
for keyname in indata:
@@ -34,7 +37,9 @@ def data_by_type(indata):
if not objtype:
continue
if objtype in databytype:
raise Exception("Multiple instances of type {} not yet supported".format(objtype))
raise Exception(
'Multiple instances of type {} not yet supported'.format(objtype)
)
databytype[objtype] = obj
obj['keyname'] = keyname
return databytype
@@ -58,31 +63,30 @@ class GeistClient(object):
def wc(self):
if self._wc:
return self._wc
targcfg = self.configmanager.get_node_attributes(self.node,
['hardwaremanagement.manager'],
decrypt=True)
targcfg = self.configmanager.get_node_attributes(
self.node, ['hardwaremanagement.manager'], decrypt=True
)
targcfg = targcfg.get(self.node, {})
target = targcfg.get(
'hardwaremanagement.manager', {}).get('value', None)
target = targcfg.get('hardwaremanagement.manager', {}).get('value', None)
if not target:
target = self.node
target = target.split('/', 1)[0]
cv = util.TLSCertVerifier(
self.configmanager, self.node,
'pubkeys.tls_hardwaremanager').verify_cert
self.configmanager, self.node, 'pubkeys.tls_hardwaremanager'
).verify_cert
self._wc = wc.SecureHTTPConnection(target, port=443, verifycallback=cv)
return self._wc
def login(self, configmanager):
credcfg = configmanager.get_node_attributes(self.node,
['secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword'],
decrypt=True)
credcfg = configmanager.get_node_attributes(
self.node,
['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'],
decrypt=True,
)
credcfg = credcfg.get(self.node, {})
username = credcfg.get(
'secret.hardwaremanagementuser', {}).get('value', None)
passwd = credcfg.get(
'secret.hardwaremanagementpassword', {}).get('value', None)
username = credcfg.get('secret.hardwaremanagementuser', {}).get('value', None)
passwd = credcfg.get('secret.hardwaremanagementpassword', {}).get('value', None)
if not isinstance(username, str):
username = username.decode('utf8')
if not isinstance(passwd, str):
@@ -92,26 +96,32 @@ class GeistClient(object):
self.username = username
rsp = self.wc.grab_json_response(
'/api/auth/{0}'.format(username),
{'cmd': 'login', 'data': {'password': passwd}})
{'cmd': 'login', 'data': {'password': passwd}},
)
token = rsp['data']['token']
return token
def logout(self):
if self._token:
self.wc.grab_json_response('/api/auth/{0}'.format(self.username),
{'cmd': 'logout', 'token': self.token})
self.wc.grab_json_response(
'/api/auth/{0}'.format(self.username),
{'cmd': 'logout', 'token': self.token},
)
self._token = None
def get_outlet(self, outlet):
rsp = self.wc.grab_json_response('/api/dev')
rsp = rsp['data']
dbt = data_by_type(rsp)
if 't3hd' in dbt:
del dbt['t3hd']
if len(dbt) != 1:
raise Exception('Multiple PDUs not supported per pdu')
pdutype = list(dbt)[0]
outlet = dbt[pdutype]['outlet'][str(int(outlet) - 1)]
state = outlet['state'].split('2')[-1]
return state
@@ -125,12 +135,20 @@ class GeistClient(object):
raise Exception('Multiple PDUs per endpoint not supported')
pdu = dbt[list(dbt)[0]]['keyname']
outlet = int(outlet) - 1
rsp = self.wc.grab_json_response(
'/api/dev/{0}/outlet/{1}'.format(pdu, outlet),
{'cmd': 'control', 'token': self.token,
'data': {'action': state, 'delay': False}})
{
'cmd': 'control',
'token': self.token,
'data': {'action': state, 'delay': False},
},
)
def process_measurement(keyname, name, enttype, entname, measurement, readings, category):
def process_measurement(
keyname, name, enttype, entname, measurement, readings, category
):
if measurement['type'] == 'realPower':
if category not in ('all', 'power'):
return
@@ -147,6 +165,10 @@ def process_measurement(keyname, name, enttype, entname, measurement, readings,
if category not in ('all',):
return
readtype = 'Voltage'
elif measurement['type'] == 'current':
if category not in ('all',):
return
readtype = 'Current'
elif measurement['type'] == 'temperature':
readtype = 'Temperature'
elif measurement['type'] == 'dewpoint':
@@ -158,23 +180,35 @@ def process_measurement(keyname, name, enttype, entname, measurement, readings,
myname = entname + ' ' + readtype
if name != 'all' and simplify_name(myname) != name:
return
readings.append({
'name': myname,
'value': float(measurement['value']),
'units': measurement['units'],
'type': readtype.split()[-1]
})
readings.append(
{
'name': myname,
'value': float(measurement['value']),
'units': measurement['units'],
'type': readtype.split()[-1],
}
)
def process_measurements(name, category, measurements, enttype, readings):
for measure in util.natural_sort(list(measurements)):
measurement = measurements[measure]['measurement']
entname = measurements[measure]['name']
for measureid in measurement:
process_measurement(measure, name, enttype, entname, measurement[measureid], readings, category)
process_measurement(
measure,
name,
enttype,
entname,
measurement[measureid],
readings,
category,
)
_sensors_by_node = {}
def read_sensors(element, node, configmanager):
category, name = element[-2:]
justnames = False
@@ -192,10 +226,12 @@ def read_sensors(element, node, configmanager):
_sensors_by_node[node] = (adev, time.time() + 1)
sn = _sensors_by_node.get(node, None)
dbt = data_by_type(sn[0]['data'])
readings = []
for datatype in dbt:
for datatype in dbt:
datum = dbt[datatype]
process_measurements(name, category, datum['entity'], 'entity', readings)
if 'outlet' in datum:
process_measurements(name, category, datum['outlet'], 'outlet', readings)
if justnames:
@@ -204,25 +240,78 @@ def read_sensors(element, node, configmanager):
else:
yield msg.SensorReadings(readings, name=node)
def get_outlet(node, configmanager, element):
def get_outlet(element, node, configmanager):
gc = GeistClient(node, configmanager)
state = gc.get_outlet(element[-1])
return msg.PowerState(node=node, state=state)
def read_firmware(node, configmanager):
gc = GeistClient(node, configmanager)
adev = gc.wc.grab_json_response('/api/sys')
myversion = adev['data']['version']
yield msg.Firmware([{'PDU Firmware': {'version': myversion}}], node)
def read_inventory(element, node, configmanager):
_inventory = {}
inventory = {}
gc = GeistClient(node, configmanager)
adev = gc.wc.grab_json_response('/api/sys')
basedata = adev['data']
inventory['present'] = True
inventory['name'] = 'PDU'
for elem in basedata.items():
if (
elem[0] != 'component'
and elem[0] != 'locale'
and elem[0] != 'state'
and elem[0] != 'contact'
and elem[0] != 'appVersion'
and elem[0] != 'build'
and elem[0] != 'version'
and elem[0] != 'apiVersion'
):
temp = elem[0]
if elem[0] == 'serialNumber':
temp = 'Serial'
elif elem[0] == 'partNumber':
temp = 'P/N'
elif elem[0] == 'modelNumber':
temp = 'Lenovo P/N and Serial'
_inventory[temp] = elem[1]
elif elem[0] == 'component':
tempname = ''
for component in basedata['component'].items():
for item in component:
if type(item) == str:
tempname = item
else:
for entry in item.items():
temp = entry[0]
if temp == 'sn':
temp = 'Serial'
_inventory[tempname + ' ' + temp] = entry[1]
inventory['information'] = _inventory
yield msg.KeyValueData({'inventory': [inventory]}, node)
def retrieve(nodes, element, configmanager, inputdata):
if 'outlets' in element:
gp = greenpool.GreenPile(pdupool)
for node in nodes:
gp.spawn(get_outlet, node, configmanager, element)
gp.spawn(get_outlet, element, node, configmanager)
for res in gp:
yield res
return
elif element[0] == 'sensors':
gp = greenpool.GreenPile(pdupool)
@@ -239,11 +328,20 @@ def retrieve(nodes, element, configmanager, inputdata):
for rsp in gp:
for datum in rsp:
yield datum
elif '/'.join(element).startswith('inventory/hardware/all'):
gp = greenpool.GreenPile(pdupool)
for node in nodes:
gp.spawn(read_inventory, element, node, configmanager)
for rsp in gp:
for datum in rsp:
yield datum
else:
for node in nodes:
yield msg.ConfluentResourceUnavailable(node, 'Not implemented')
yield msg.ConfluentResourceUnavailable(node, 'Not implemented')
return
def update(nodes, element, configmanager, inputdata):
if 'outlets' not in element:
yield msg.ConfluentResourceUnavailable(node, 'Not implemented')

View File

@@ -659,7 +659,9 @@ class IpmiHandler(object):
elif self.element[1:4] == ['management_controller', 'extended', 'advanced']:
return self.handle_bmcconfig(True)
elif self.element[1:4] == ['management_controller', 'extended', 'extra']:
return self.handle_bmcconfig(True, extended=True)
return self.handle_bmcconfig(advanced=False, extended=True)
elif self.element[1:4] == ['management_controller', 'extended', 'extra_advanced']:
return self.handle_bmcconfig(advanced=True, extended=True)
elif self.element[1:3] == ['system', 'all']:
return self.handle_sysconfig()
elif self.element[1:3] == ['system', 'advanced']:
@@ -1376,10 +1378,8 @@ class IpmiHandler(object):
def identify(self):
if 'update' == self.op:
identifystate = self.inputdata.inputbynode[self.node] == 'on'
if self.inputdata.inputbynode[self.node] == 'blink':
raise exc.InvalidArgumentException(
'"blink" is not supported with ipmi')
self.ipmicmd.set_identify(on=identifystate)
blinkstate = self.inputdata.inputbynode[self.node] == 'blink'
self.ipmicmd.set_identify(on=identifystate, blink=blinkstate)
self.output.put(msg.IdentifyState(
node=self.node, state=self.inputdata.inputbynode[self.node]))
return
@@ -1472,7 +1472,8 @@ class IpmiHandler(object):
if 'read' == self.op:
try:
if extended:
bmccfg = self.ipmicmd.get_extended_bmc_configuration()
bmccfg = self.ipmicmd.get_extended_bmc_configuration(
hideadvanced=(not advanced))
else:
bmccfg = self.ipmicmd.get_bmc_configuration()
self.output.put(msg.ConfigSet(self.node, bmccfg))

View File

@@ -516,12 +516,20 @@ class IpmiHandler(object):
return self.handle_ntp()
elif self.element[1:4] == ['management_controller', 'extended', 'all']:
return self.handle_bmcconfig()
elif self.element[1:4] == ['management_controller', 'extended', 'advanced']:
return self.handle_bmcconfig(True)
elif self.element[1:4] == ['management_controller', 'extended', 'extra']:
return self.handle_bmcconfig(advanced=False, extended=True)
elif self.element[1:4] == ['management_controller', 'extended', 'extra_advanced']:
return self.handle_bmcconfig(advanced=True, extended=True)
elif self.element[1:3] == ['system', 'all']:
return self.handle_sysconfig()
elif self.element[1:3] == ['system', 'advanced']:
return self.handle_sysconfig(True)
elif self.element[1:3] == ['system', 'clear']:
return self.handle_sysconfigclear()
elif self.element[1:3] == ['management_controller', 'clear']:
return self.handle_bmcconfigclear()
elif self.element[1:3] == ['management_controller', 'licenses']:
return self.handle_licenses()
elif self.element[1:3] == ['management_controller', 'save_licenses']:
@@ -1310,12 +1318,15 @@ class IpmiHandler(object):
if 'read' == self.op:
lc = self.ipmicmd.get_location_information()
def handle_bmcconfig(self, advanced=False):
def handle_bmcconfig(self, advanced=False, extended=False):
if 'read' == self.op:
try:
self.output.put(msg.ConfigSet(
self.node,
self.ipmicmd.get_bmc_configuration()))
if extended:
bmccfg = self.ipmicmd.get_extended_bmc_configuration(
hideadvanced=(not advanced))
else:
bmccfg = self.ipmicmd.get_bmc_configuration()
self.output.put(msg.ConfigSet(self.node, bmccfg))
except Exception as e:
self.output.put(
msg.ConfluentNodeError(self.node, str(e)))
@@ -1323,6 +1334,12 @@ class IpmiHandler(object):
self.ipmicmd.set_bmc_configuration(
self.inputdata.get_attributes(self.node))
def handle_bmcconfigclear(self):
if 'read' == self.op:
raise exc.InvalidArgumentException(
'Cannot read the "clear" resource')
self.ipmicmd.clear_bmc_configuration()
def handle_sysconfigclear(self):
if 'read' == self.op:
raise exc.InvalidArgumentException(

View File

@@ -43,7 +43,6 @@ if cryptography and cryptography.__version__.split('.') < ['1', '5']:
paramiko.transport.Transport._preferred_keys)
class HostKeyHandler(paramiko.client.MissingHostKeyPolicy):
def __init__(self, configmanager, node):
@@ -112,7 +111,7 @@ class SshShell(conapi.Console):
# that would rather not use the nodename as anything but an opaque
# identifier
self.datacallback = callback
if self.username is not b'':
if self.username != b'':
self.logon()
else:
self.inputmode = 0
@@ -259,6 +258,115 @@ class SshShell(conapi.Console):
self.ssh.close()
self.datacallback = None
def create(nodes, element, configmanager, inputdata):
if len(nodes) == 1:
return SshShell(nodes[0], configmanager)
class SshConn():
def __init__(self, node, config, username=b'', password=b''):
self.node = node
self.ssh = None
self.datacallback = None
self.nodeconfig = config
self.username = username
self.password = password
self.connected = False
self.inputmode = 0 # 0 = username, 1 = password...
def __del__(self):
if self.connected:
self.close()
def do_logon(self):
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(
HostKeyHandler(self.nodeconfig, self.node))
log.log({'info': f"Connecting to {self.node} by ssh"})
try:
if self.password:
self.ssh.connect(self.node, username=self.username,
password=self.password, allow_agent=False,
look_for_keys=False)
else:
self.ssh.connect(self.node, username=self.username)
except paramiko.AuthenticationException as e:
self.ssh.close()
self.inputmode = 0
self.username = b''
self.password = b''
log.log({'warn': f"Error connecting to {self.node}: {str(e)}"})
return
except paramiko.ssh_exception.NoValidConnectionsError as e:
self.ssh.close()
self.inputmode = 0
self.username = b''
self.password = b''
log.log({'warn': f"Error connecting to {self.node}: {str(e)}"})
return
except cexc.PubkeyInvalid as pi:
self.ssh.close()
self.keyaction = b''
self.candidatefprint = pi.fingerprint
log.log({'warn': pi.message})
self.keyattrname = pi.attrname
log.log({'info': f"New fingerprint: {pi.fingerprint}"})
self.inputmode = -1
return
except paramiko.SSHException as pi:
self.ssh.close()
self.inputmode = -2
warn = str(pi)
if warnhostkey:
warn += ' (Older cryptography package on this host only ' \
'works with ed25519, check ssh startup on target ' \
'and permissions on /etc/ssh/*key)\r\n'
log.log({'warn': warn})
return
except Exception as e:
self.ssh.close()
self.ssh.close()
self.inputmode = 0
self.username = b''
self.password = b''
log.log({'warn': f"Error connecting to {self.node}: {str(e)}"})
return
self.inputmode = 2
self.connected = True
log.log({'info': f"Connected by ssh to {self.node}"})
def exec_command(self, cmd, cmdargs):
safecmd = cmd.translate(str.maketrans({"[": r"\]",
"]": r"\]",
"?": r"\?",
"!": r"\!",
"\\": r"\\",
"^": r"\^",
"$": r"\$",
" ": r"\ ",
"*": r"\*"}))
cmds = [safecmd]
for arg in cmdargs:
arg = arg.translate(str.maketrans({"[": r"\]",
"]": r"\]",
"?": r"\?",
"!": r"\!",
"\\": r"\\",
"^": r"\^",
"$": r"\$",
" ": r"\ ",
"*": r"\*"}))
arg = "%s" % (str(arg).replace(r"'", r"'\''"),)
cmds.append(arg)
runcmd = " ".join(cmds)
stdin, stdout, stderr = self.ssh.exec_command(runcmd)
rcode = stdout.channel.recv_exit_status()
return stdout.readlines(), stderr.readlines()
def close(self):
if self.ssh is not None:
self.ssh.close()
log.log({'info': f"Disconnected from {self.node}"})

View File

@@ -282,7 +282,7 @@ def handle_request(env, start_response):
ifidx = int(nici.read())
ncfg = netutil.get_nic_config(cfg, nodename, ifidx=ifidx)
else:
ncfg = netutil.get_nic_config(cfg, nodename, serverip=myip)
ncfg = netutil.get_nic_config(cfg, nodename, serverip=myip, clientip=clientip)
if env['PATH_INFO'] == '/self/deploycfg':
for key in list(ncfg):
if 'v6' in key:

View File

@@ -70,15 +70,17 @@ try:
# so we need to ffi that in using a strategy compatible with PyOpenSSL
import OpenSSL.SSL as libssln
import OpenSSL.crypto as crypto
from OpenSSL._util import ffi
except ImportError:
libssl = None
ffi = None
crypto = None
plainsocket = None
libc = ctypes.CDLL(ctypes.util.find_library('c'))
libsslc = ctypes.CDLL(ctypes.util.find_library('ssl'))
libsslc.SSL_CTX_set_cert_verify_callback.argtypes = [
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
def _should_authlog(path, operation):
if (operation == 'retrieve' and
@@ -389,11 +391,24 @@ def _tlshandler(bind_host, bind_port):
else:
eventlet.spawn_n(_tlsstartup, cnn)
@ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)
def verify_stub(store, misc):
return 1
class PyObject_HEAD(ctypes.Structure):
_fields_ = [
("ob_refcnt", ctypes.c_ssize_t),
("ob_type", ctypes.c_void_p),
]
# see main/Modules/_ssl.c, only caring about the SSL_CTX pointer
class PySSLContext(ctypes.Structure):
_fields_ = [
("ob_base", PyObject_HEAD),
("ctx", ctypes.c_void_p),
]
if ffi:
@ffi.callback("int(*)( X509_STORE_CTX *, void*)")
def verify_stub(store, misc):
return 1
def _tlsstartup(cnn):
@@ -416,8 +431,8 @@ def _tlsstartup(cnn):
ctx.use_certificate_file('/etc/confluent/srvcert.pem')
ctx.use_privatekey_file('/etc/confluent/privkey.pem')
ctx.set_verify(libssln.VERIFY_PEER, lambda *args: True)
libssln._lib.SSL_CTX_set_cert_verify_callback(ctx._context,
verify_stub, ffi.NULL)
ssl_ctx = PySSLContext.from_address(id(ctx._context)).ctx
libsslc.SSL_CTX_set_cert_verify_callback(ssl_ctx, verify_stub, 0)
cnn = libssl.Connection(ctx, cnn)
cnn.set_accept_state()
cnn.do_handshake()

View File

@@ -213,15 +213,18 @@ def initialize_root_key(generate, automation=False):
suffix = 'automationpubkey'
else:
suffix = 'rootpubkey'
keyname = '/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
myname, suffix)
if authorized:
with open(keyname, 'w'):
pass
for auth in authorized:
shutil.copy(
auth,
'/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
myname, suffix))
os.chmod('/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
myname, suffix), 0o644)
os.chown('/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
myname, suffix), neededuid, -1)
with open(auth, 'r') as local_key:
with open(keyname, 'a') as dest:
dest.write(local_key.read())
if os.path.exists(keyname):
os.chmod(keyname, 0o644)
os.chown(keyname, neededuid, -1)
if alreadyexist:
raise AlreadyExists()

View File

@@ -0,0 +1,217 @@
import confluent.auth as auth
import eventlet
import confluent.messages as msg
import confluent.exceptions as exc
import confluent.util as util
import confluent.config.configmanager as configmanager
import struct
import eventlet.green.socket as socket
import eventlet.green.subprocess as subprocess
import base64
import os
import pwd
import confluent.httpapi as httpapi
mountsbyuser = {}
_vinzfd = None
_vinztoken = None
webclient = eventlet.import_patched('pyghmi.util.webclient')
# Handle the vinz VNC session
def assure_vinz():
global _vinzfd
global _vinztoken
if _vinzfd is None:
_vinztoken = base64.b64encode(os.urandom(33), altchars=b'_-').decode()
os.environ['VINZ_TOKEN'] = _vinztoken
os.makedirs('/var/run/confluent/vinz/sessions', exist_ok=True)
_vinzfd = subprocess.Popen(
['/opt/confluent/bin/vinz',
'-c', '/var/run/confluent/vinz/control',
'-w', '127.0.0.1:4007',
'-a', '/var/run/confluent/vinz/approval',
# vinz supports unix domain websocket, however apache reverse proxy is dicey that way in some versions
'-d', '/var/run/confluent/vinz/sessions'])
while not os.path.exists('/var/run/confluent/vinz/control'):
eventlet.sleep(0.5)
eventlet.spawn(monitor_requests)
_unix_by_nodename = {}
def get_url(nodename, inputdata):
method = inputdata.inputbynode[nodename]
assure_vinz()
if method == 'wss':
return f'/vinz/kvmsession/{nodename}'
elif method == 'unix':
if nodename not in _unix_by_nodename or not os.path.exists(_unix_by_nodename[nodename]):
_unix_by_nodename[nodename] = request_session(nodename)
return _unix_by_nodename[nodename]
_usersessions = {}
def close_session(sessionid):
sessioninfo = _usersessions.get(sessionid, None)
if not sessioninfo:
return
del _usersessions[sessionid]
nodename = sessioninfo['nodename']
wc = sessioninfo['webclient']
cfg = configmanager.ConfigManager(None)
c = cfg.get_node_attributes(
nodename,
['secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword',
], decrypt=True)
bmcuser = c.get(nodename, {}).get(
'secret.hardwaremanagementuser', {}).get('value', None)
bmcpass = c.get(nodename, {}).get(
'secret.hardwaremanagementpassword', {}).get('value', None)
if not isinstance(bmcuser, str):
bmcuser = bmcuser.decode()
if not isinstance(bmcpass, str):
bmcpass = bmcpass.decode()
if bmcuser and bmcpass:
wc.grab_json_response_with_status(
'/logout', {'data': [bmcuser, bmcpass]},
headers={
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-XSRF-TOKEN': wc.cookies['XSRF-TOKEN']})
def send_grant(conn, nodename):
cfg = configmanager.ConfigManager(None)
c = cfg.get_node_attributes(
nodename,
['secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword',
'hardwaremanagement.manager'], decrypt=True)
bmcuser = c.get(nodename, {}).get(
'secret.hardwaremanagementuser', {}).get('value', None)
bmcpass = c.get(nodename, {}).get(
'secret.hardwaremanagementpassword', {}).get('value', None)
bmc = c.get(nodename, {}).get(
'hardwaremanagement.manager', {}).get('value', None)
if bmcuser and bmcpass and bmc:
kv = util.TLSCertVerifier(cfg, nodename,
'pubkeys.tls_hardwaremanager').verify_cert
wc = webclient.SecureHTTPConnection(bmc, 443, verifycallback=kv)
if not isinstance(bmcuser, str):
bmcuser = bmcuser.decode()
if not isinstance(bmcpass, str):
bmcpass = bmcpass.decode()
rsp = wc.grab_json_response_with_status(
'/login', {'data': [bmcuser, bmcpass]},
headers={'Content-Type': 'application/json',
'Accept': 'application/json'})
sessionid = wc.cookies['SESSION']
sessiontok = wc.cookies['XSRF-TOKEN']
_usersessions[sessionid] = {
'webclient': wc,
'nodename': nodename,
}
url = '/kvm/0'
fprintinfo = cfg.get_node_attributes(nodename, 'pubkeys.tls_hardwaremanager')
fprint = fprintinfo.get(
nodename, {}).get('pubkeys.tls_hardwaremanager', {}).get('value', None)
if not fprint:
return
fprint = fprint.split('$', 1)[1]
fprint = bytes.fromhex(fprint)
conn.send(struct.pack('!BI', 1, len(bmc)))
conn.send(bmc.encode())
conn.send(struct.pack('!I', len(sessionid)))
conn.send(sessionid.encode())
conn.send(struct.pack('!I', len(sessiontok)))
conn.send(sessiontok.encode())
conn.send(struct.pack('!I', len(fprint)))
conn.send(fprint)
conn.send(struct.pack('!I', len(url)))
conn.send(url.encode())
conn.send(b'\xff')
def evaluate_request(conn):
allow = False
authname = None
try:
creds = conn.getsockopt(socket.SOL_SOCKET, socket.SO_PEERCRED,
struct.calcsize('iII'))
pid, uid, gid = struct.unpack('iII', creds)
if uid != os.getuid():
return
rqcode, fieldlen = struct.unpack('!BI', conn.recv(5))
authtoken = conn.recv(fieldlen).decode()
if authtoken != _vinztoken:
return
if rqcode == 2: # disconnect notification
fieldlen = struct.unpack('!I', conn.recv(4))[0]
sessionid = conn.recv(fieldlen).decode()
close_session(sessionid)
conn.recv(1) # digest 0xff
if rqcode == 1: # request for new connection
fieldlen = struct.unpack('!I', conn.recv(4))[0]
nodename = conn.recv(fieldlen).decode()
idtype = struct.unpack('!B', conn.recv(1))[0]
if idtype == 1:
usernum = struct.unpack('!I', conn.recv(4))[0]
if usernum == 0: # root is a special guy
send_grant(conn, nodename)
return
try:
authname = pwd.getpwuid(usernum).pw_name
except Exception:
return
elif idtype == 2:
fieldlen = struct.unpack('!I', conn.recv(4))[0]
sessionid = conn.recv(fieldlen)
fieldlen = struct.unpack('!I', conn.recv(4))[0]
sessiontok = conn.recv(fieldlen)
try:
authname = httpapi.get_user_for_session(sessionid, sessiontok)
except Exception:
return
else:
return
conn.recv(1) # should be 0xff
if authname:
allow = auth.authorize(authname, f'/nodes/{nodename}/console/ikvm')
if allow:
send_grant(conn, nodename)
finally:
conn.close()
def monitor_requests():
a = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.remove('/var/run/confluent/vinz/approval')
except Exception:
pass
a.bind('/var/run/confluent/vinz/approval')
os.chmod('/var/run/confluent/vinz/approval', 0o600)
a.listen(8)
while True:
conn, addr = a.accept()
eventlet.spawn_n(evaluate_request, conn)
def request_session(nodename):
assure_vinz()
a = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
a.connect('/var/run/confluent/vinz/control')
nodename = nodename.encode()
a.send(struct.pack('!BI', 1, len(nodename)))
a.send(nodename)
a.send(b'\xff')
rsp = a.recv(1)
retcode = struct.unpack('!B', rsp)[0]
if retcode != 1:
raise Exception("Bad return code")
rsp = a.recv(4)
nlen = struct.unpack('!I', rsp)[0]
sockname = a.recv(nlen).decode('utf8')
retcode = a.recv(1)
if retcode != b'\xff':
raise Exception("Unrecognized response")
return os.path.join('/var/run/confluent/vinz/sessions', sockname)

View File

@@ -1,12 +1,16 @@
%define name confluent_server
%define version #VERSION#
%define fversion %{lua:
sv, _ = string.gsub("#VERSION#", "[~+]", "-")
print(sv)
}
%define release 1
Summary: confluent systems management server
Name: %{name}
Version: %{version}
Release: %{release}
Source0: %{name}-%{version}.tar.gz
Source0: %{name}-%{fversion}.tar.gz
License: Apache2
Group: Development/Libraries
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
@@ -14,6 +18,7 @@ Prefix: %{_prefix}
BuildArch: noarch
Requires: confluent_vtbufferd
%if "%{dist}" == ".el7"
Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-webauthn-rp, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic
%else
%if "%{dist}" == ".el8"
@@ -23,6 +28,7 @@ Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-webauthn-rp, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
%else
Requires: python3-dbm,python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodome >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-webauthn-rp, python3-dnspython, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-PyYAML openssl iproute
%endif
%endif
%endif
@@ -33,7 +39,7 @@ Url: https://github.com/lenovo/confluent
Server for console management and systems management aggregation
%prep
%setup -n %{name}-%{version} -n %{name}-%{version}
%setup -n %{name}-%{fversion}
%build
%if "%{dist}" == ".el7"

View File

@@ -2,7 +2,11 @@ cd `dirname $0`
VERSION=`git describe|cut -d- -f 1`
NUMCOMMITS=`git describe|cut -d- -f 2`
if [ "$NUMCOMMITS" != "$VERSION" ]; then
VERSION=$VERSION.dev$NUMCOMMITS+g`git describe|cut -d- -f 3`
LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev)
LASTNUM=$((LASTNUM+1))
FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev)
VERSION=${FIRSTPART}.${LASTNUM}
VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3`
fi
echo $VERSION > VERSION
sed -e "s/#VERSION#/$VERSION/" setup.py.tmpl > setup.py

View File

@@ -8,7 +8,11 @@ DSCARGS="--with-python3=True --with-python2=False"
VERSION=`git describe|cut -d- -f 1`
NUMCOMMITS=`git describe|cut -d- -f 2`
if [ "$NUMCOMMITS" != "$VERSION" ]; then
VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3`
LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev)
LASTNUM=$((LASTNUM+1))
FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev)
VERSION=${FIRSTPART}.${LASTNUM}
VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3`
fi
cd ..
rm -rf /tmp/confluent

View File

@@ -1,7 +1,11 @@
VERSION=`git describe|cut -d- -f 1`
NUMCOMMITS=`git describe|cut -d- -f 2`
if [ "$NUMCOMMITS" != "$VERSION" ]; then
VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3`
LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev)
LASTNUM=$((LASTNUM+1))
FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev)
VERSION=${FIRSTPART}.${LASTNUM}
VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3`
fi
mkdir -p dist/confluent_vtbufferd-$VERSION
cp ../LICENSE NOTICE *.c *.h Makefile dist/confluent_vtbufferd-$VERSION

28
imgutil/builddeb Executable file
View File

@@ -0,0 +1,28 @@
#!/bin/bash
VERSION=`git describe|cut -d- -f 1`
NUMCOMMITS=`git describe|cut -d- -f 2`
if [ "$NUMCOMMITS" != "$VERSION" ]; then
LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev)
LASTNUM=$((LASTNUM+1))
FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev)
VERSION=${FIRSTPART}.${LASTNUM}
VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3`
fi
mkdir -p /tmp/confluent-imgutil
cp -a * /tmp/confluent-imgutil
cp ../LICENSE /tmp/confluent-imgutil
cd /tmp/confluent-imgutil
rm -rf deb/confluent_imgutil_$VERSION/
mkdir -p deb/confluent_imgutil_$VERSION/DEBIAN/
mkdir -p deb/confluent_imgutil_$VERSION/opt/confluent/lib/imgutil
mkdir -p deb/confluent_imgutil_$VERSION/opt/confluent/bin
mv imgutil deb/confluent_imgutil_$VERSION/opt/confluent/bin/
chmod a+x deb/confluent_imgutil_$VERSION/opt/confluent/bin/imgutil
mv ubuntu* suse15 el7 el9 el8 deb/confluent_imgutil_$VERSION/opt/confluent/lib/imgutil/
mkdir -p deb/confluent_imgutil_$VERSION/opt/confluent/share/licenses/confluent_imgutil
cp LICENSE deb/confluent_imgutil_$VERSION/opt/confluent/share/licenses/confluent_imgutil
sed -e 's/#VERSION#/'$VERSION/ control.tmpl > deb/confluent_imgutil_$VERSION/DEBIAN/control
dpkg-deb --build deb/confluent_imgutil_$VERSION
if [ ! -z "$1" ]; then
mv deb/confluent_imgutil_$VERSION.deb $1
fi

View File

@@ -2,7 +2,11 @@
VERSION=`git describe|cut -d- -f 1`
NUMCOMMITS=`git describe|cut -d- -f 2`
if [ "$NUMCOMMITS" != "$VERSION" ]; then
VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3`
LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev)
LASTNUM=$((LASTNUM+1))
FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev)
VERSION=${FIRSTPART}.${LASTNUM}
VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3`
fi
sed -e "s/#VERSION#/$VERSION/" confluent_imgutil.spec.tmpl > confluent_imgutil.spec
cp ../LICENSE .

View File

@@ -8,13 +8,13 @@ Source: confluent_imgutil.tar.xz
BuildArch: noarch
BuildRoot: /tmp/
%if "%{dist}" == ".el8"
Requires: squashfs-tools
Requires: squashfs-tools cpio
%else
%if "%{dist}" == ".el9"
Requires: squashfs-tools
Requires: squashfs-tools cpio
%else
%if "%{dist}" == ".el7"
Requires: squashfs-tools
Requires: squashfs-tools cpio
%else
Requires: squashfs
%endif

9
imgutil/control.tmpl Normal file
View File

@@ -0,0 +1,9 @@
Package: confluent-imgutil
Version: #VERSION#
Section: base
Priority: optional
Maintainer: Jarrod Johnson <jjohnson2@lenovo.com>
Description: Web frontend for confluent server
Architecture: all
Depends: debootstrap

View File

@@ -655,10 +655,27 @@ class DebHandler(OsHandler):
def prep_root(self, args):
shutil.copy('/etc/apt/sources.list', os.path.join(self.targpath, 'etc/apt/sources.list'))
for listfile in glob.glob('/etc/apt/sources.list.d/*'):
shutil.copy(listfile, os.path.join(self.targpath, listfile[1:]))
args.cmd = ['apt-get', 'update']
run_constrainedx(fancy_chroot, (args, self.targpath))
args.cmd = ['apt-get', '-y', 'install'] + self.includepkgs
run_constrainedx(fancy_chroot, (args, self.targpath))
servicefile = os.path.join(
self.targpath, 'usr/lib/systemd/system/ssh.service')
if os.path.exists(servicefile):
targfile = os.path.join(
self.targpath,
'etc/systemd/system/multi-user.target.wants/ssh.service')
if not os.path.exists(targfile):
os.symlink('/usr/lib/systemd/system/ssh.service', targfile)
else:
targfile = os.path.join(
self.targpath,
'etc/systemd/system/multi-user.target.wants/sshd.service')
if not os.path.exists(targfile):
os.symlink('/usr/lib/systemd/system/sshd.service', targfile)
class ElHandler(OsHandler):
@@ -934,6 +951,8 @@ def fancy_chroot(args, installroot):
sourceresolv = '/etc/resolv.conf'
if os.path.islink(sourceresolv):
sourceresolv = os.readlink(sourceresolv)
# normalize and resolve relative and absolute paths
sourceresolv = os.path.normpath(os.path.join('/etc', sourceresolv))
dstresolv = os.path.join(installroot, 'etc/resolv.conf')
if os.path.islink(dstresolv):
dstresolv = os.path.join(installroot, os.readlink(dstresolv)[1:])
@@ -944,8 +963,7 @@ def fancy_chroot(args, installroot):
_mount('none', dstresolv, flags=MS_RDONLY|MS_REMOUNT|MS_BIND)
os.chroot(installroot)
os.chdir('/')
_mount('/', '/', flags=MS_BIND) # Make / manifest as a mounted filesystem in exec
os.environ['PS1'] = '[\x1b[1m\x1b[4mIMGUTIL EXEC {0}\x1b[0m \W]$ '.format(imgname)
os.environ['PS1'] = '[\x1b[1m\x1b[4mIMGUTIL EXEC {0}\x1b[0m \\W]$ '.format(imgname)
os.environ['CONFLUENT_IMGUTIL_MODE'] = 'exec'
if oshandler:
oshandler.set_source('/run/confluentdistro')
@@ -985,7 +1003,13 @@ def build_root_backend(optargs):
def _mount_constrained_fs(args, installroot):
# This is prepping for a chroot.
# For the target environment to be content with having a root
# filesystem, installroot must be a 'mount' entry of it's own,
# so bind mount to itself to satisfy
_mount(installroot, installroot, flags=MS_BIND)
_mount('/dev', os.path.join(installroot, 'dev'), flags=MS_BIND|MS_RDONLY)
_mount('/dev/pts', os.path.join(installroot, 'dev/pts'), flags=MS_BIND|MS_RDONLY)
_mount('proc', os.path.join(installroot, 'proc'), fstype='proc')
_mount('sys', os.path.join(installroot, 'sys'), fstype='sysfs')
_mount('runfs', os.path.join(installroot, 'run'), fstype='tmpfs')

1
imgutil/ubuntu24.04 Symbolic link
View File

@@ -0,0 +1 @@
ubuntu