2
0
mirror of https://github.com/xcat2/confluent.git synced 2026-02-08 16:52:30 +00:00

Merge branch 'master' into async

This commit is contained in:
Jarrod Johnson
2024-08-08 09:45:15 -04:00
27 changed files with 458 additions and 70 deletions

View File

@@ -90,8 +90,14 @@ touch /tmp/cryptpkglist
touch /tmp/pkglist
touch /tmp/addonpackages
if [ "$cryptboot" == "tpm2" ]; then
LUKSPARTY="--encrypted --passphrase=$(cat /etc/confluent/confluent.apikey)"
echo $cryptboot >> /tmp/cryptboot
lukspass=$(python3 /opt/confluent/bin/apiclient /confluent-api/self/profileprivate/pending/luks.key 2> /dev/null)
if [ -z "$lukspass" ]; then
lukspass=$(python3 -c 'import os;import base64;print(base64.b64encode(os.urandom(66)).decode())')
fi
echo $lukspass > /etc/confluent/luks.key
chmod 000 /etc/confluent/luks.key
LUKSPARTY="--encrypted --passphrase=$lukspass"
echo $cryptboot >> /tmp/cryptboot
echo clevis-dracut >> /tmp/cryptpkglist
fi
@@ -114,7 +120,7 @@ confluentpython /etc/confluent/apiclient /confluent-public/os/$confluent_profile
grep '^%include /tmp/partitioning' /tmp/kickstart.* > /dev/null || rm /tmp/installdisk
if [ -e /tmp/installdisk -a ! -e /tmp/partitioning ]; then
INSTALLDISK=$(cat /tmp/installdisk)
sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e s/%%LUKSHOOK%%/$LUKSPARTY/ /tmp/partitioning.template > /tmp/partitioning
sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e "s!%%LUKSHOOK%%!$LUKSPARTY!" /tmp/partitioning.template > /tmp/partitioning
vgchange -a n >& /dev/null
wipefs -a -f /dev/$INSTALLDISK >& /dev/null
fi

View File

@@ -1,4 +1,5 @@
#!/bin/sh
cryptdisk=$(blkid -t TYPE="crypto_LUKS"|sed -e s/:.*//)
clevis luks bind -f -d $cryptdisk -k - tpm2 '{}' < /etc/confluent/confluent.apikey
cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey
clevis luks bind -f -d $cryptdisk -k - tpm2 '{}' < /etc/confluent/luks.key
chmod 000 /etc/confluent/luks.key
#cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey

View File

@@ -58,6 +58,10 @@ if ! grep console= /proc/cmdline > /dev/null; then
echo "Automatic console configured for $autocons"
fi
echo sshd:x:30:30:SSH User:/var/empty/sshd:/sbin/nologin >> /etc/passwd
modprobe ib_ipoib
modprobe ib_umad
modprobe hfi1
modprobe mlx5_ib
cd /sys/class/net
for nic in *; do
ip link set $nic up

View File

@@ -8,7 +8,7 @@ for addr in $(grep ^MANAGER: /etc/confluent/confluent.info|awk '{print $2}'|sed
fi
done
mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay
if grep confluennt_imagemethtod=untethered /proc/cmdline > /dev/null; then
if grep confluent_imagemethod=untethered /proc/cmdline > /dev/null; then
mount -t tmpfs untethered /mnt/remoteimg
curl https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs -o /mnt/remoteimg/rootimg.sfs
else

View File

@@ -0,0 +1,12 @@
import yaml
import os
ainst = {}
with open('/autoinstall.yaml', 'r') as allin:
ainst = yaml.safe_load(allin)
ainst['storage']['layout']['password'] = os.environ['lukspass']
with open('/autoinstall.yaml', 'w') as allout:
yaml.safe_dump(ainst, allout)

View File

@@ -0,0 +1,26 @@
#!/usr/bin/python3
import yaml
import os
ainst = {}
with open('/autoinstall.yaml', 'r') as allin:
ainst = yaml.safe_load(allin)
tz = None
ntps = []
with open('/etc/confluent/confluent.deploycfg', 'r') as confluentdeploycfg:
dcfg = yaml.safe_load(confluentdeploycfg)
tz = dcfg['timezone']
ntps = dcfg.get('ntpservers', [])
if ntps and not ainst.get('ntp', None):
ainst['ntp'] = {}
ainst['ntp']['enabled'] = True
ainst['ntp']['servers'] = ntps
if tz and not ainst.get('timezone'):
ainst['timezone'] = tz
with open('/autoinstall.yaml', 'w') as allout:
yaml.safe_dump(ainst, allout)

View File

@@ -60,10 +60,12 @@ cp /custom-installation/confluent/bin/apiclient /target/opt/confluent/bin
mount -o bind /dev /target/dev
mount -o bind /proc /target/proc
mount -o bind /sys /target/sys
mount -o bind /run /target/run
mount -o bind /sys/firmware/efi/efivars /target/sys/firmware/efi/efivars
if [ 1 = $updategrub ]; then
chroot /target update-grub
fi
echo "Port 22" >> /etc/ssh/sshd_config
echo "Port 2222" >> /etc/ssh/sshd_config
echo "Match LocalPort 22" >> /etc/ssh/sshd_config
@@ -88,8 +90,36 @@ chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d
source /target/etc/confluent/functions
run_remote_config post
if [ -f /etc/confluent_lukspass ]; then
numdevs=$(lsblk -lo name,uuid|grep $(awk '{print $2}' < /target/etc/crypttab |sed -e s/UUID=//)|wc -l)
if [ 0$numdevs -ne 1 ]; then
wall "Unable to identify the LUKS device, halting install"
while :; do sleep 86400; done
fi
CRYPTTAB_SOURCE=$(awk '{print $2}' /target/etc/crypttab)
. /target/usr/lib/cryptsetup/functions
crypttab_resolve_source
if [ ! -e $CRYPTTAB_SOURCE ]; then
wall "Unable to find $CRYPTTAB_SOURCE, halting install"
while :; do sleep 86400; done
fi
cp /etc/confluent_lukspass /target/etc/confluent/luks.key
chmod 000 /target/etc/confluent/luks.key
lukspass=$(cat /etc/confluent_lukspass)
chroot /target apt install libtss2-rc0
PASSWORD=$lukspass chroot /target systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs="" $CRYPTTAB_SOURCE
fetch_remote systemdecrypt
mv systemdecrypt /target/etc/initramfs-tools/scripts/local-top/systemdecrypt
fetch_remote systemdecrypt-hook
mv systemdecrypt-hook /target/etc/initramfs-tools/hooks/systemdecrypt
chmod 755 /target/etc/initramfs-tools/scripts/local-top/systemdecrypt /target/etc/initramfs-tools/hooks/systemdecrypt
chroot /target update-initramfs -u
fi
python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged'
umount /target/sys /target/dev /target/proc
umount /target/sys /target/dev /target/proc /target/run
) &
tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console

View File

@@ -13,11 +13,6 @@ exec 2>> /var/log/confluent/confluent-pre.log
chmod 600 /var/log/confluent/confluent-pre.log
cryptboot=$(grep encryptboot: $deploycfg|sed -e 's/^encryptboot: //')
if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then
echo "****Encrypted boot requested, but not implemented for this OS, halting install" > /dev/console
[ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but not implemented for this OS,halting install" >> $(cat /tmp/autoconsdev))
while :; do sleep 86400; done
fi
cat /custom-installation/ssh/*pubkey > /root/.ssh/authorized_keys
@@ -45,6 +40,24 @@ if [ ! -e /tmp/installdisk ]; then
python3 /custom-installation/getinstalldisk
fi
sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml
run_remote_python mergetime
if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then
lukspass=$(python3 /opt/confluent/bin/apiclient /confluent-api/self/profileprivate/pending/luks.key 2> /dev/null)
if [ -z "$lukspass" ]; then
lukspass=$(head -c 66 < /dev/urandom |base64 -w0)
fi
export lukspass
run_remote_python addcrypt
if ! grep 'password:' /autoinstall.yaml > /dev/null; then
echo "****Encrypted boot requested, but the user-data does not have a hook to enable,halting install" > /dev/console
[ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but the user-data does not have a hook to enable,halting install" >> $(cat /tmp/autoconsdev))
while :; do sleep 86400; done
fi
sed -i s!%%CRYPTPASS%%!$lukspass! /autoinstall.yaml
sed -i s!'#CRYPTBOOT'!! /autoinstall.yaml
echo -n $lukspass > /etc/confluent_lukspass
chmod 000 /etc/confluent_lukspass
fi
) &
tail --pid $! -n 0 -F /var/log/confluent/confluent-pre.log > /dev/console

View File

@@ -0,0 +1,17 @@
#!/bin/sh
case $1 in
prereqs)
echo
exit 0
;;
esac
systemdecryptnow() {
. /usr/lib/cryptsetup/functions
local CRYPTTAB_SOURCE=$(awk '{print $2}' /systemdecrypt/crypttab)
local CRYPTTAB_NAME=$(awk '{print $1}' /systemdecrypt/crypttab)
crypttab_resolve_source
/lib/systemd/systemd-cryptsetup attach "${CRYPTTAB_NAME}" "${CRYPTTAB_SOURCE}" none tpm2-device=auto
}
systemdecryptnow

View File

@@ -0,0 +1,26 @@
#!/bin/sh
case "$1" in
prereqs)
echo
exit 0
;;
esac
. /usr/share/initramfs-tools/hook-functions
mkdir -p $DESTDIR/systemdecrypt
copy_exec /lib/systemd/systemd-cryptsetup /lib/systemd
for i in /lib/x86_64-linux-gnu/libtss2*
do
copy_exec ${i} /lib/x86_64-linux-gnu
done
if [ -f /lib/x86_64-linux-gnu/cryptsetup/libcryptsetup-token-systemd-tpm2.so ]; then
mkdir -p $DESTDIR/lib/x86_64-linux-gnu/cryptsetup
copy_exec /lib/x86_64-linux-gnu/cryptsetup/libcryptsetup-token-systemd-tpm2.so /lib/x86_64-linux-gnu/cryptsetup
fi
mkdir -p $DESTDIR/scripts/local-top
echo /scripts/local-top/systemdecrypt >> $DESTDIR/scripts/local-top/ORDER
if [ -f $DESTDIR/cryptroot/crypttab ]; then
mv $DESTDIR/cryptroot/crypttab $DESTDIR/systemdecrypt/crypttab
fi

View File

@@ -22,6 +22,9 @@ import shutil
import greenlet
import pwd
import signal
import confluent.collective.manager as collective
import confluent.noderange as noderange
def fprint(txt):
sys.stdout.write(txt)
@@ -254,6 +257,9 @@ async def main():
uuid = rsp.get('id.uuid', {}).get('value', None)
if uuid:
uuidok = True
if 'collective.managercandidates' in rsp:
# Check if current node in candidates
pass
if 'deployment.useinsecureprotocols' in rsp:
insec = rsp.get('deployment.useinsecureprotocols', {}).get('value', None)
if insec != 'firmware':
@@ -272,8 +278,27 @@ async def main():
switch_value = rsp[key].get('value',None)
if switch_value and switch_value not in valid_nodes:
emprint(f'{switch_value} is not a valid node name (as referenced by attribute "{key}" of node {args.node}).')
print(f"Checking network configuration for {args.node}")
cfg = configmanager.ConfigManager(None)
cfd = cfg.get_node_attributes(
args.node, ('deployment.*', 'collective.managercandidates'))
profile = cfd.get(args.node, {}).get(
'deployment.pendingprofile', {}).get('value', None)
if not profile:
emprint(
f'{args.node} is not currently set to deploy any '
'profile, network boot attempts will be ignored')
candmgrs = cfd.get(args.node, {}).get(
'collective.managercandidates', {}).get('value', None)
if candmgrs:
try:
candmgrs = noderange.NodeRange(candmgrs, cfg).nodes
except Exception: # fallback to unverified noderange
candmgrs = noderange.NodeRange(candmgrs).nodes
if collective.get_myname() not in candmgrs:
emprint(f'{args.node} has deployment restricted to '
'certain collective managers excluding the '
'system running the selfcheck')
print(f"Checking network configuration for {args.node}")
bootablev4nics = []
bootablev6nics = []
targsships = []

View File

@@ -35,6 +35,8 @@ cd deb_dist/!(*.orig)/
if [ "$OPKGNAME" = "confluent-server" ]; then
if grep wheezy /etc/os-release; then
sed -i 's/^\(Depends:.*\)/\1, python-confluent-client, python-lxml, python-eficompressor, python-pycryptodomex, python-dateutil, python-pyopenssl, python-msgpack/' debian/control
elif grep jammy /etc/os-release; then
sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi, python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd, python3-netifaces, python3-yaml, python3-dateutil/' debian/control
else
sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi, python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd, python3-netifaces, python3-yaml, python3-dateutil, python3-pyasyncore/' debian/control
fi

View File

@@ -75,7 +75,7 @@ def get_certificate_paths():
continue
kploc = check_apache_config(os.path.join(currpath,
fname))
if keypath and kploc[0]:
if keypath and kploc[0] and keypath != kploc[0]:
return None, None # Ambiguous...
if kploc[0]:
keypath, certpath = kploc

View File

@@ -2657,7 +2657,7 @@ class ConfigManager(object):
dumpdata[confarea][element][attribute]['cryptvalue'] = '!'.join(cryptval)
elif isinstance(dumpdata[confarea][element][attribute], set):
dumpdata[confarea][element][attribute] = \
list(dumpdata[confarea][element][attribute])
confluent.util.natural_sort(list(dumpdata[confarea][element][attribute]))
return json.dumps(
dumpdata, sort_keys=True, indent=4, separators=(',', ': '))

View File

@@ -75,6 +75,7 @@ import confluent.discovery.handlers.tsm as tsm
import confluent.discovery.handlers.pxe as pxeh
import confluent.discovery.handlers.smm as smm
import confluent.discovery.handlers.xcc as xcc
import confluent.discovery.handlers.xcc3 as xcc3
import confluent.discovery.handlers.megarac as megarac
import confluent.exceptions as exc
import confluent.log as log
@@ -114,6 +115,7 @@ nodehandlers = {
'service:lenovo-smm': smm,
'service:lenovo-smm2': smm,
'lenovo-xcc': xcc,
'lenovo-xcc3': xcc3,
'megarac-bmc': megarac,
'service:management-hardware.IBM:integrated-management-module2': imm,
'pxe-client': pxeh,
@@ -134,6 +136,7 @@ servicenames = {
'service:lenovo-smm2': 'lenovo-smm2',
'affluent-switch': 'affluent-switch',
'lenovo-xcc': 'lenovo-xcc',
'lenovo-xcc3': 'lenovo-xcc3',
'megarac-bmc': 'megarac-bmc',
#'openbmc': 'openbmc',
'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2',
@@ -150,6 +153,7 @@ servicebyname = {
'lenovo-smm2': 'service:lenovo-smm2',
'affluent-switch': 'affluent-switch',
'lenovo-xcc': 'lenovo-xcc',
'lenovo-xcc3': 'lenovo-xcc3',
'megarac-bmc': 'megarac-bmc',
'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2',
'lenovo-switch': 'service:io-device.Lenovo:management-module',

View File

@@ -57,8 +57,28 @@ class NodeHandler(generic.NodeHandler):
self.csrftok = None
self.channel = None
self.atdefault = True
self._srvroot = None
self._mgrinfo = None
super(NodeHandler, self).__init__(info, configmanager)
def srvroot(self, wc):
if not self._srvroot:
srvroot, status = wc.grab_json_response_with_status('/redfish/v1/')
if status == 200:
self._srvroot = srvroot
return self._srvroot
def mgrinfo(self, wc):
if not self._mgrinfo:
mgrs = self.srvroot(wc)['Managers']['@odata.id']
rsp = wc.grab_json_response(mgrs)
if len(rsp['Members']) != 1:
raise Exception("Can not handle multiple Managers")
mgrurl = rsp['Members'][0]['@odata.id']
self._mgrinfo = wc.grab_json_response(mgrurl)
return self._mgrinfo
def get_firmware_default_account_info(self):
raise Exception('This must be subclassed')
@@ -75,11 +95,36 @@ class NodeHandler(generic.NodeHandler):
fprint = util.get_fingerprint(self.https_cert)
return util.cert_matches(fprint, certificate)
def enable_ipmi(self, wc):
npu = self.mgrinfo(wc).get(
'NetworkProtocol', {}).get('@odata.id', None)
if not npu:
raise Exception('Cannot enable IPMI, no NetworkProtocol on BMC')
npi = wc.grab_json_response(npu)
if not npi.get('IPMI', {}).get('ProtocolEnabled'):
wc.set_header('If-Match', '*')
wc.grab_json_response_with_status(
npu, {'IPMI': {'ProtocolEnabled': True}}, method='PATCH')
acctinfo = wc.grab_json_response_with_status(
self.target_account_url(wc))
acctinfo = acctinfo[0]
actypes = acctinfo['AccountTypes']
candidates = acctinfo['AccountTypes@Redfish.AllowableValues']
if 'IPMI' not in actypes and 'IPMI' in candidates:
actypes.append('IPMI')
acctupd = {
'AccountTypes': actypes,
'Password': self.currpass,
}
rsp = wc.grab_json_response_with_status(
self.target_account_url(wc), acctupd, method='PATCH')
def _get_wc(self):
defuser, defpass = self.get_firmware_default_account_info()
wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert)
wc.set_basic_credentials(defuser, defpass)
wc.set_header('Content-Type', 'application/json')
wc.set_header('Accept', 'application/json')
authmode = 0
if not self.trieddefault:
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
@@ -114,7 +159,7 @@ class NodeHandler(generic.NodeHandler):
if status > 400:
self.trieddefault = True
if status == 401:
wc.set_basic_credentials(self.DEFAULT_USER, self.targpass)
wc.set_basic_credentials(defuser, self.targpass)
rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
if status == 200: # Default user still, but targpass
self.currpass = self.targpass
@@ -143,13 +188,33 @@ class NodeHandler(generic.NodeHandler):
self.currpass = self.targpass
return wc
def target_account_url(self, wc):
asrv = self.srvroot(wc).get('AccountService', {}).get('@odata.id')
rsp, status = wc.grab_json_response_with_status(asrv)
accts = rsp.get('Accounts', {}).get('@odata.id')
rsp, status = wc.grab_json_response_with_status(accts)
accts = rsp.get('Members', [])
for accturl in accts:
accturl = accturl.get('@odata.id', '')
if accturl:
rsp, status = wc.grab_json_response_with_status(accturl)
if rsp.get('UserName', None) == self.curruser:
targaccturl = accturl
break
else:
raise Exception("Unable to identify Account URL to modify on this BMC")
return targaccturl
def config(self, nodename):
mgrs = None
self.nodename = nodename
creds = self.configmanager.get_node_attributes(
nodename, ['secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword',
'hardwaremanagement.manager', 'hardwaremanagement.method', 'console.method'],
True)
'hardwaremanagement.manager',
'hardwaremanagement.method',
'console.method'],
True)
cd = creds.get(nodename, {})
defuser, defpass = self.get_firmware_default_account_info()
user, passwd, _ = self.get_node_credentials(
@@ -159,7 +224,6 @@ class NodeHandler(generic.NodeHandler):
self.targuser = user
self.targpass = passwd
wc = self._get_wc()
srvroot, status = wc.grab_json_response_with_status('/redfish/v1/')
curruserinfo = {}
authupdate = {}
wc.set_header('Content-Type', 'application/json')
@@ -168,31 +232,23 @@ class NodeHandler(generic.NodeHandler):
if passwd != self.currpass:
authupdate['Password'] = passwd
if authupdate:
targaccturl = None
asrv = srvroot.get('AccountService', {}).get('@odata.id')
rsp, status = wc.grab_json_response_with_status(asrv)
accts = rsp.get('Accounts', {}).get('@odata.id')
rsp, status = wc.grab_json_response_with_status(accts)
accts = rsp.get('Members', [])
for accturl in accts:
accturl = accturl.get('@odata.id', '')
if accturl:
rsp, status = wc.grab_json_response_with_status(accturl)
if rsp.get('UserName', None) == self.curruser:
targaccturl = accturl
break
else:
raise Exception("Unable to identify Account URL to modify on this BMC")
targaccturl = self.target_account_url(wc)
rsp, status = wc.grab_json_response_with_status(targaccturl, authupdate, method='PATCH')
if status >= 300:
raise Exception("Failed attempting to update credentials on BMC")
self.curruser = user
self.currpass = passwd
wc.set_basic_credentials(user, passwd)
_, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
tries = 10
while tries and status >= 300:
tries -= 1
eventlet.sleep(1.0)
_, status = wc.grab_json_response_with_status('/redfish/v1/Managers')
_, status = wc.grab_json_response_with_status(
'/redfish/v1/Managers')
if (cd.get('hardwaremanagement.method', {}).get('value', 'ipmi') != 'redfish'
or cd.get('console.method', {}).get('value', None) == 'ipmi'):
self.enable_ipmi(wc)
if ('hardwaremanagement.manager' in cd and
cd['hardwaremanagement.manager']['value'] and
not cd['hardwaremanagement.manager']['value'].startswith(
@@ -203,14 +259,8 @@ class NodeHandler(generic.NodeHandler):
newip = newipinfo[-1][0]
if ':' in newip:
raise exc.NotImplementedException('IPv6 remote config TODO')
mgrs = srvroot['Managers']['@odata.id']
rsp = wc.grab_json_response(mgrs)
if len(rsp['Members']) != 1:
raise Exception("Can not handle multiple Managers")
mgrurl = rsp['Members'][0]['@odata.id']
mginfo = wc.grab_json_response(mgrurl)
hifurls = get_host_interface_urls(wc, mginfo)
mgtnicinfo = mginfo['EthernetInterfaces']['@odata.id']
hifurls = get_host_interface_urls(wc, self.mgrinfo(wc))
mgtnicinfo = self.mgrinfo(wc)['EthernetInterfaces']['@odata.id']
mgtnicinfo = wc.grab_json_response(mgtnicinfo)
mgtnics = [x['@odata.id'] for x in mgtnicinfo.get('Members', [])]
actualnics = []
@@ -236,7 +286,9 @@ class NodeHandler(generic.NodeHandler):
break
else:
wc.set_header('If-Match', '*')
rsp, status = wc.grab_json_response_with_status(actualnics[0], {'IPv4StaticAddresses': [newconfig]}, method='PATCH')
rsp, status = wc.grab_json_response_with_status(actualnics[0], {
'DHCPv4': {'DHCPEnabled': False},
'IPv4StaticAddresses': [newconfig]}, method='PATCH')
elif self.ipaddr.startswith('fe80::'):
self.configmanager.set_node_attributes(
{nodename: {'hardwaremanagement.manager': self.ipaddr}})

View File

@@ -0,0 +1,104 @@
# Copyright 2024 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import confluent.discovery.handlers.redfishbmc as redfishbmc
import eventlet.support.greendns
import confluent.util as util
webclient = eventlet.import_patched('pyghmi.util.webclient')
getaddrinfo = eventlet.support.greendns.getaddrinfo
class NodeHandler(redfishbmc.NodeHandler):
devname = 'XCC'
def get_firmware_default_account_info(self):
return ('USERID', 'PASSW0RD')
def scan(self):
ip, port = self.get_web_port_and_ip()
c = webclient.SecureHTTPConnection(ip, port,
verifycallback=self.validate_cert)
c.set_header('Accept', 'application/json')
i = c.grab_json_response('/api/providers/logoninfo')
modelname = i.get('items', [{}])[0].get('machine_name', None)
if modelname:
self.info['modelname'] = modelname
for attrname in list(self.info.get('attributes', {})):
val = self.info['attributes'][attrname]
if '-uuid' == attrname[-5:] and len(val) == 32:
val = val.lower()
self.info['attributes'][attrname] = '-'.join([val[:8], val[8:12], val[12:16], val[16:20], val[20:]])
attrs = self.info.get('attributes', {})
room = attrs.get('room-id', None)
if room:
self.info['room'] = room
rack = attrs.get('rack-id', None)
if rack:
self.info['rack'] = rack
name = attrs.get('name', None)
if name:
self.info['hostname'] = name
unumber = attrs.get('lowest-u', None)
if unumber:
self.info['u'] = unumber
location = attrs.get('location', None)
if location:
self.info['location'] = location
mtm = attrs.get('enclosure-machinetype-model', None)
if mtm:
self.info['modelnumber'] = mtm.strip()
sn = attrs.get('enclosure-serial-number', None)
if sn:
self.info['serialnumber'] = sn.strip()
if attrs.get('enclosure-form-factor', None) == 'dense-computing':
encuuid = attrs.get('chassis-uuid', None)
if encuuid:
self.info['enclosure.uuid'] = fixuuid(encuuid)
slot = int(attrs.get('slot', 0))
if slot != 0:
self.info['enclosure.bay'] = slot
def validate_cert(self, certificate):
fprint = util.get_fingerprint(self.https_cert)
return util.cert_matches(fprint, certificate)
def remote_nodecfg(nodename, cfm):
cfg = cfm.get_node_attributes(
nodename, 'hardwaremanagement.manager')
ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get(
'value', None)
ipaddr = ipaddr.split('/', 1)[0]
ipaddr = getaddrinfo(ipaddr, 0)[0][-1]
if not ipaddr:
raise Exception('Cannot remote configure a system without known '
'address')
info = {'addresses': [ipaddr]}
nh = NodeHandler(info, cfm)
nh.config(nodename)
if __name__ == '__main__':
import confluent.config.configmanager as cfm
c = cfm.ConfigManager(None)
import sys
info = {'addresses': [[sys.argv[1]]]}
print(repr(info))
testr = NodeHandler(info, c)
testr.config(sys.argv[2])

View File

@@ -481,18 +481,23 @@ async def snoop(handler, protocol=None):
cloop = asyncio.get_running_loop()
cloop.add_reader(net, relay_packet, net, pktq)
cloop.add_reader(net4, relay_packet, net4, pktq)
newmacs = set([])
known_peers = set([])
peerbymacaddress = {}
deferpeers = []
while True:
try:
newmacs = set([])
newmacs.clear()
r, _, _ = select.select((net, net4), (), (), 60)
# clear known_peers and peerbymacaddress
# to avoid stale info getting in...
# rely upon the select(0.2) to catch rapid fire and aggregate ip
# addresses that come close together
# calling code needs to understand deeper context, as snoop
# will now yield dupe info over time
known_peers = set([])
peerbymacaddress = {}
deferpeers = []
known_peers.clear()
peerbymacaddress.clear()
deferpeers.clear()
timeo = 60
rdy = True
srp = await pktq.get()

View File

@@ -117,7 +117,7 @@ def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byeha
continue # MegaRAC redfish
elif value.endswith('/DeviceDescription.json'):
targurl = '/DeviceDescription.json'
targtype = 'megarac-bmc'
targtype = 'lenovo-xcc'
else:
return
if handler:
@@ -182,11 +182,14 @@ async def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
cloop.add_reader(net4, _relay_pkt, net4, pktq)
cloop.add_reader(net6, _relay_pkt, net6, pktq)
peerbymacaddress = {}
newmacs = set([])
deferrednotifies = []
machandlers = {}
while True:
try:
newmacs = set([])
deferrednotifies = []
machandlers = {}
newmacs.clear()
deferrednotifies.clear()
machandlers.clear()
timeout = None
srp = await pktq.get()
recent_peers = set([])
@@ -480,18 +483,25 @@ async def check_fish(urldata, port=443, verifycallback=None):
url, data, targtype = urldata
try:
wc = webclient.WebConnection(_get_svrip(data), port, verifycallback=verifycallback)
peerinfo = await wc.grab_json_response(url)
peerinfo = await wc.grab_json_response(url, headers={'Accept': 'application/json'})
except socket.error:
return None
if url == '/DeviceDescription.json':
if not peerinfo:
return None
try:
peerinfo = peerinfo[0]
except KeyError:
peerinfo['xcc-variant'] = '3'
except IndexError:
return None
try:
myuuid = peerinfo['node-uuid'].lower()
if '-' not in myuuid:
myuuid = '-'.join([myuuid[:8], myuuid[8:12], myuuid[12:16], myuuid[16:20], myuuid[20:]])
data['uuid'] = myuuid
data['attributes'] = peerinfo
data['services'] = ['lenovo-xcc']
data['services'] = ['lenovo-xcc'] if 'xcc-variant' not in peerinfo else ['lenovo-xcc' + peerinfo['xcc-variant']]
return data
except (IndexError, KeyError):
return None

View File

@@ -258,16 +258,20 @@ def setlimits():
def assure_ownership(path):
try:
if os.getuid() != os.stat(path).st_uid:
sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path))
if os.getuid() == 0:
sys.stderr.write('Attempting to run as root, when non-root usage is detected\n')
else:
sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path))
sys.exit(1)
except OSError as e:
if e.errno == 13:
sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path))
if os.getuid() == 0:
sys.stderr.write('Attempting to run as root, when non-root usage is detected\n')
else:
sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path))
sys.exit(1)
def sanity_check():
if os.getuid() == 0:
return True
assure_ownership('/etc/confluent')
assure_ownership('/etc/confluent/cfg')
for filename in glob.glob('/etc/confluent/cfg/*'):

View File

@@ -134,14 +134,19 @@ class TsmConsole(conapi.Console):
kv = util.TLSCertVerifier(
self.nodeconfig, self.node, 'pubkeys.tls_hardwaremanager').verify_cert
wc = webclient.SecureHTTPConnection(self.origbmc, 443, verifycallback=kv)
rsp = wc.grab_json_response_with_status('/login', {'data': [self.username.decode('utf8'), self.password.decode("utf8")]}, headers={'Content-Type': 'application/json'})
try:
rsp = wc.grab_json_response_with_status('/login', {'data': [self.username.decode('utf8'), self.password.decode("utf8")]}, headers={'Content-Type': 'application/json', 'Accept': 'application/json'})
except Exception as e:
raise cexc.TargetEndpointUnreachable(str(e))
if rsp[1] > 400:
raise cexc.TargetEndpointBadCredentials
bmc = self.bmc
if '%' in self.bmc:
prefix = self.bmc.split('%')[0]
bmc = prefix + ']'
self.ws = WrappedWebSocket(host=bmc)
self.ws.set_verify_callback(kv)
self.ws.connect('wss://{0}/console0'.format(self.bmc), host=bmc, cookie='XSRF-TOKEN={0}; SESSION={1}'.format(wc.cookies['XSRF-TOKEN'], wc.cookies['SESSION']))
self.ws.connect('wss://{0}/console0'.format(self.bmc), host=bmc, cookie='XSRF-TOKEN={0}; SESSION={1}'.format(wc.cookies['XSRF-TOKEN'], wc.cookies['SESSION']), subprotocols=[wc.cookies['XSRF-TOKEN']])
self.connected = True
eventlet.spawn_n(self.recvdata)
return

View File

@@ -522,6 +522,8 @@ class IpmiHandler(object):
return self.handle_sysconfig(True)
elif self.element[1:3] == ['system', 'clear']:
return self.handle_sysconfigclear()
elif self.element[1:3] == ['management_controller', 'clear']:
return self.handle_bmcconfigclear()
elif self.element[1:3] == ['management_controller', 'licenses']:
return self.handle_licenses()
elif self.element[1:3] == ['management_controller', 'save_licenses']:
@@ -1323,6 +1325,12 @@ class IpmiHandler(object):
self.ipmicmd.set_bmc_configuration(
self.inputdata.get_attributes(self.node))
def handle_bmcconfigclear(self):
if 'read' == self.op:
raise exc.InvalidArgumentException(
'Cannot read the "clear" resource')
self.ipmicmd.clear_bmc_configuration()
def handle_sysconfigclear(self):
if 'read' == self.op:
raise exc.InvalidArgumentException(

View File

@@ -390,6 +390,23 @@ async def _tlshandler(bind_host, bind_port):
else:
asyncio.create_task(_tlsstartup(cnn))
@ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)
def verify_stub(store, misc):
return 1
class PyObject_HEAD(ctypes.Structure):
_fields_ = [
("ob_refcnt", ctypes.c_ssize_t),
("ob_type", ctypes.c_void_p),
]
# see main/Modules/_ssl.c, only caring about the SSL_CTX pointer
class PySSLContext(ctypes.Structure):
_fields_ = [
("ob_base", PyObject_HEAD),
("ctx", ctypes.c_void_p),
]
@ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)
def verify_stub(store, misc):

View File

@@ -14,13 +14,13 @@ Prefix: %{_prefix}
BuildArch: noarch
Requires: confluent_vtbufferd
%if "%{dist}" == ".el7"
Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic
Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic cpio
%else
%if "%{dist}" == ".el8"
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute cpio
%else
%if "%{dist}" == ".el9"
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute cpio
%else
Requires: python3-dbm,python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodome >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dnspython, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-PyYAML openssl iproute
%endif

View File

@@ -8,13 +8,13 @@ Source: confluent_imgutil.tar.xz
BuildArch: noarch
BuildRoot: /tmp/
%if "%{dist}" == ".el8"
Requires: squashfs-tools
Requires: squashfs-tools cpio
%else
%if "%{dist}" == ".el9"
Requires: squashfs-tools
Requires: squashfs-tools cpio
%else
%if "%{dist}" == ".el7"
Requires: squashfs-tools
Requires: squashfs-tools cpio
%else
Requires: squashfs
%endif

View File

@@ -5,4 +5,5 @@ Priority: optional
Maintainer: Jarrod Johnson <jjohnson2@lenovo.com>
Description: Web frontend for confluent server
Architecture: all
Depends: debootstrap

View File

@@ -661,11 +661,20 @@ class DebHandler(OsHandler):
run_constrainedx(fancy_chroot, (args, self.targpath))
args.cmd = ['apt-get', '-y', 'install'] + self.includepkgs
run_constrainedx(fancy_chroot, (args, self.targpath))
servicefile = os.path.join(self.targpath, 'usr/lib/systemd/system/ssh.service')
servicefile = os.path.join(
self.targpath, 'usr/lib/systemd/system/ssh.service')
if os.path.exists(servicefile):
os.symlink('/usr/lib/systemd/system/ssh.service', os.path.join(self.targpath, 'etc/systemd/system/multi-user.target.wants/ssh.service'))
targfile = os.path.join(
self.targpath,
'etc/systemd/system/multi-user.target.wants/ssh.service')
if not os.path.exists(targfile):
os.symlink('/usr/lib/systemd/system/ssh.service', targfile)
else:
os.symlink('/usr/lib/systemd/system/sshd.service', os.path.join(self.targpath, 'etc/systemd/system/multi-user.target.wants/sshd.service'))
targfile = os.path.join(
self.targpath,
'etc/systemd/system/multi-user.target.wants/sshd.service')
if not os.path.exists(targfile):
os.symlink('/usr/lib/systemd/system/sshd.service', targfile)
@@ -942,6 +951,8 @@ def fancy_chroot(args, installroot):
sourceresolv = '/etc/resolv.conf'
if os.path.islink(sourceresolv):
sourceresolv = os.readlink(sourceresolv)
# normalize and resolve relative and absolute paths
sourceresolv = os.path.normpath(os.path.join('/etc', sourceresolv))
dstresolv = os.path.join(installroot, 'etc/resolv.conf')
if os.path.islink(dstresolv):
dstresolv = os.path.join(installroot, os.readlink(dstresolv)[1:])
@@ -952,7 +963,6 @@ def fancy_chroot(args, installroot):
_mount('none', dstresolv, flags=MS_RDONLY|MS_REMOUNT|MS_BIND)
os.chroot(installroot)
os.chdir('/')
_mount('/', '/', flags=MS_BIND) # Make / manifest as a mounted filesystem in exec
os.environ['PS1'] = '[\x1b[1m\x1b[4mIMGUTIL EXEC {0}\x1b[0m \\W]$ '.format(imgname)
os.environ['CONFLUENT_IMGUTIL_MODE'] = 'exec'
if oshandler:
@@ -993,7 +1003,13 @@ def build_root_backend(optargs):
def _mount_constrained_fs(args, installroot):
# This is prepping for a chroot.
# For the target environment to be content with having a root
# filesystem, installroot must be a 'mount' entry of it's own,
# so bind mount to itself to satisfy
_mount(installroot, installroot, flags=MS_BIND)
_mount('/dev', os.path.join(installroot, 'dev'), flags=MS_BIND|MS_RDONLY)
_mount('/dev/pts', os.path.join(installroot, 'dev/pts'), flags=MS_BIND|MS_RDONLY)
_mount('proc', os.path.join(installroot, 'proc'), fstype='proc')
_mount('sys', os.path.join(installroot, 'sys'), fstype='sysfs')
_mount('runfs', os.path.join(installroot, 'run'), fstype='tmpfs')