From ede941c0d91d5bd00229f1b28998097e9881530d Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 22 Jul 2024 13:46:27 -0400 Subject: [PATCH 01/51] Add deb packaging of imgutil --- imgutil/builddeb | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100755 imgutil/builddeb diff --git a/imgutil/builddeb b/imgutil/builddeb new file mode 100755 index 00000000..4258fbc4 --- /dev/null +++ b/imgutil/builddeb @@ -0,0 +1,24 @@ +#!/bin/bash +VERSION=`git describe|cut -d- -f 1` +NUMCOMMITS=`git describe|cut -d- -f 2` +if [ "$NUMCOMMITS" != "$VERSION" ]; then + VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3` +fi +mkdir -p /tmp/confluent-imgutil +cp -a * /tmp/confluent-imgutil +cp ../LICENSE /tmp/confluent-imgutil +cd /tmp/confluent-imgutil +rm -rf deb/confluent_imgutil_$VERSION/ +mkdir -p deb/confluent_imgutil_$VERSION/DEBIAN/ +mkdir -p deb/confluent_imgutil_$VERSION/opt/confluent/lib/imgutil +mkdir -p deb/confluent_imgutil_$VERSION/opt/confluent/bin +mv imgutil deb/confluent_imgutil_$VERSION/opt/confluent/bin/ +chmod a+x deb/confluent_imgutil_$VERSION/opt/confluent/bin/imgutil +mv ubuntu* suse15 el7 el9 el8 deb/confluent_imgutil_$VERSION/opt/confluent/lib/imgutil/ +mkdir -p deb/confluent_imgutil_$VERSION/opt/confluent/share/licenses/confluent_imgutil +cp LICENSE deb/confluent_imgutil_$VERSION/opt/confluent/share/licenses/confluent_imgutil +sed -e 's/#VERSION#/'$VERSION/ control.tmpl > deb/confluent_imgutil_$VERSION/DEBIAN/control +dpkg-deb --build deb/lenovo_confluent_$VERSION +if [ ! -z "$1" ]; then + mv deb/lenovo-confluent_$VERSION.deb $1 +fi From 36ca68f44dba1bb028fc32a0aa4f17e8c9154a0a Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 22 Jul 2024 13:46:45 -0400 Subject: [PATCH 02/51] Add control file for deb build of imgutil --- imgutil/control.tmpl | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 imgutil/control.tmpl diff --git a/imgutil/control.tmpl b/imgutil/control.tmpl new file mode 100644 index 00000000..a0fe21af --- /dev/null +++ b/imgutil/control.tmpl @@ -0,0 +1,8 @@ +Package: confluent-imgutil +Version: #VERSION# +Section: base +Priority: optional +Maintainer: Jarrod Johnson +Description: Web frontend for confluent server +Architecture: all + From 2235faa76dea31f1a455c1f2208bdd2e086b8be4 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 24 Jul 2024 08:33:20 -0400 Subject: [PATCH 03/51] Stop using private interface of PyCA PyCA changes their minds about which bindings to include. So make the binding ourselves since PyCA removed it in certain versions. This is a backport of the implementation from the async port effort. --- confluent_server/confluent/sockapi.py | 31 ++++++++++++++++++++------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/confluent_server/confluent/sockapi.py b/confluent_server/confluent/sockapi.py index 2d4db15b..8aca0058 100644 --- a/confluent_server/confluent/sockapi.py +++ b/confluent_server/confluent/sockapi.py @@ -70,15 +70,17 @@ try: # so we need to ffi that in using a strategy compatible with PyOpenSSL import OpenSSL.SSL as libssln import OpenSSL.crypto as crypto - from OpenSSL._util import ffi except ImportError: libssl = None - ffi = None crypto = None plainsocket = None libc = ctypes.CDLL(ctypes.util.find_library('c')) +libsslc = ctypes.CDLL(ctypes.util.find_library('ssl')) +libsslc.SSL_CTX_set_cert_verify_callback.argtypes = [ + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + def _should_authlog(path, operation): if (operation == 'retrieve' and @@ -389,11 +391,24 @@ def _tlshandler(bind_host, bind_port): else: eventlet.spawn_n(_tlsstartup, cnn) +@ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p) +def verify_stub(store, misc): + return 1 + +class PyObject_HEAD(ctypes.Structure): + _fields_ = [ + ("ob_refcnt", ctypes.c_ssize_t), + ("ob_type", ctypes.c_void_p), + ] + + +# see main/Modules/_ssl.c, only caring about the SSL_CTX pointer +class PySSLContext(ctypes.Structure): + _fields_ = [ + ("ob_base", PyObject_HEAD), + ("ctx", ctypes.c_void_p), + ] -if ffi: - @ffi.callback("int(*)( X509_STORE_CTX *, void*)") - def verify_stub(store, misc): - return 1 def _tlsstartup(cnn): @@ -416,8 +431,8 @@ def _tlsstartup(cnn): ctx.use_certificate_file('/etc/confluent/srvcert.pem') ctx.use_privatekey_file('/etc/confluent/privkey.pem') ctx.set_verify(libssln.VERIFY_PEER, lambda *args: True) - libssln._lib.SSL_CTX_set_cert_verify_callback(ctx._context, - verify_stub, ffi.NULL) + ssl_ctx = PySSLContext.from_address(id(ctx)).ctx + libsslc.SSL_CTX_set_cert_verify_callback(ssl_ctx, verify_stub, 0) cnn = libssl.Connection(ctx, cnn) cnn.set_accept_state() cnn.do_handshake() From c91af840e510ccbed54a858a97623792398aa1ee Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 24 Jul 2024 11:12:31 -0400 Subject: [PATCH 04/51] Robust handling of relative link resolv.conf resolv.conf may be a relative link, normal file, or absolute link. Handle all cases. --- imgutil/imgutil | 2 ++ 1 file changed, 2 insertions(+) diff --git a/imgutil/imgutil b/imgutil/imgutil index 907a3b64..bc34af01 100644 --- a/imgutil/imgutil +++ b/imgutil/imgutil @@ -942,6 +942,8 @@ def fancy_chroot(args, installroot): sourceresolv = '/etc/resolv.conf' if os.path.islink(sourceresolv): sourceresolv = os.readlink(sourceresolv) + # normalize and resolve relative and absolute paths + sourceresolv = os.path.normpath(os.path.join('/etc', sourceresolv)) dstresolv = os.path.join(installroot, 'etc/resolv.conf') if os.path.islink(dstresolv): dstresolv = os.path.join(installroot, os.readlink(dstresolv)[1:]) From 714fefe31bbff6d5e3fee989707554dc59daa2a7 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 24 Jul 2024 14:41:39 -0400 Subject: [PATCH 05/51] Fix unethered boot for ubuntu --- .../ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh index f1b8e45a..ff8d253d 100644 --- a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh +++ b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh @@ -8,7 +8,7 @@ for addr in $(grep ^MANAGER: /etc/confluent/confluent.info|awk '{print $2}'|sed fi done mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay -if grep confluennt_imagemethtod=untethered /proc/cmdline > /dev/null; then +if grep confluennt_imagemethod=untethered /proc/cmdline > /dev/null; then mount -t tmpfs untethered /mnt/remoteimg curl https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs -o /mnt/remoteimg/rootimg.sfs else From a92edc7924fbc6f593bf05e07cb7d8d38d155050 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 24 Jul 2024 15:20:02 -0400 Subject: [PATCH 06/51] Apply ownership sanity check even for root User could accidently run 'confluent' in a way that makes no sense, block it the most accessible way. The pid file should have blocked it, but systemd purges the directory even on failure. --- confluent_server/confluent/main.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/confluent_server/confluent/main.py b/confluent_server/confluent/main.py index b49d8f56..9fb27972 100644 --- a/confluent_server/confluent/main.py +++ b/confluent_server/confluent/main.py @@ -220,16 +220,20 @@ def setlimits(): def assure_ownership(path): try: if os.getuid() != os.stat(path).st_uid: - sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path)) + if os.getuid() == 0: + sys.stderr.write('Attempting to run as root, when non-root usage is detected\n') + else: + sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path)) sys.exit(1) except OSError as e: if e.errno == 13: - sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path)) + if os.getuid() == 0: + sys.stderr.write('Attempting to run as root, when non-root usage is detected\n') + else: + sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path)) sys.exit(1) def sanity_check(): - if os.getuid() == 0: - return True assure_ownership('/etc/confluent') assure_ownership('/etc/confluent/cfg') for filename in glob.glob('/etc/confluent/cfg/*'): From 6e8d8dabd11e5821d8c34a38803e631c58792c58 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 24 Jul 2024 15:28:03 -0400 Subject: [PATCH 07/51] Fix whitespace issue --- confluent_server/confluent/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_server/confluent/main.py b/confluent_server/confluent/main.py index 9fb27972..b0e3508a 100644 --- a/confluent_server/confluent/main.py +++ b/confluent_server/confluent/main.py @@ -227,7 +227,7 @@ def assure_ownership(path): sys.exit(1) except OSError as e: if e.errno == 13: - if os.getuid() == 0: + if os.getuid() == 0: sys.stderr.write('Attempting to run as root, when non-root usage is detected\n') else: sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path)) From 8f1a1130a8b9348aa682925ec9cda1871238bb70 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 24 Jul 2024 15:55:04 -0400 Subject: [PATCH 08/51] Add a selfcheck to check misdone collective manager --- confluent_server/bin/confluent_selfcheck | 27 +++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/confluent_server/bin/confluent_selfcheck b/confluent_server/bin/confluent_selfcheck index b9651d17..64794ae4 100755 --- a/confluent_server/bin/confluent_selfcheck +++ b/confluent_server/bin/confluent_selfcheck @@ -24,6 +24,9 @@ import eventlet import greenlet import pwd import signal +import confluent.collective.manager as collective +import confluent.noderange as noderange + def fprint(txt): sys.stdout.write(txt) @@ -258,6 +261,9 @@ if __name__ == '__main__': uuid = rsp.get('id.uuid', {}).get('value', None) if uuid: uuidok = True + if 'collective.managercandidates' in rsp: + # Check if current node in candidates + pass if 'deployment.useinsecureprotocols' in rsp: insec = rsp.get('deployment.useinsecureprotocols', {}).get('value', None) if insec != 'firmware': @@ -276,8 +282,27 @@ if __name__ == '__main__': switch_value = rsp[key].get('value',None) if switch_value and switch_value not in valid_nodes: emprint(f'{switch_value} is not a valid node name (as referenced by attribute "{key}" of node {args.node}).') - print(f"Checking network configuration for {args.node}") cfg = configmanager.ConfigManager(None) + cfd = cfg.get_node_attributes( + args.node, ('deployment.*', 'collective.managercandidates')) + profile = cfd.get(args.node, {}).get( + 'deployment.pendingprofile', {}).get('value', None) + if not profile: + emprint( + f'{args.node} is not currently set to deploy any ' + 'profile, network boot attempts will be ignored') + candmgrs = cfd.get(args.node, {}).get( + 'collective.managercandidates', {}).get('value', None) + if candmgrs: + try: + candmgrs = noderange.NodeRange(candmgrs, cfg).nodes + except Exception: # fallback to unverified noderange + candmgrs = noderange.NodeRange(candmgrs).nodes + if collective.get_myname() not in candmgrs: + emprint(f'{args.node} has deployment restricted to ' + 'certain collective managers excluding the ' + 'system running the selfcheck') + print(f"Checking network configuration for {args.node}") bootablev4nics = [] bootablev6nics = [] targsships = [] From c3e918fc5fa3dbc7c9bf6d4b247b657b72279c05 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Thu, 25 Jul 2024 09:42:24 -0400 Subject: [PATCH 09/51] Fix mistake in untethered support --- .../ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh index ff8d253d..0db99754 100644 --- a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh +++ b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh @@ -8,7 +8,7 @@ for addr in $(grep ^MANAGER: /etc/confluent/confluent.info|awk '{print $2}'|sed fi done mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay -if grep confluennt_imagemethod=untethered /proc/cmdline > /dev/null; then +if grep confluent_imagemethod=untethered /proc/cmdline > /dev/null; then mount -t tmpfs untethered /mnt/remoteimg curl https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs -o /mnt/remoteimg/rootimg.sfs else From 0f955cd068ca67182d668a693398bdace02d4b9b Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Thu, 25 Jul 2024 11:24:41 -0400 Subject: [PATCH 10/51] Begin work on a cryptboot support for ubuntu Start implementing a tpm2-initramfs-tool based approach. This requires a bit of an odd transition as the PCR 7 is likely to change between the install phase and the boot phase, so we have to select different PCRs, but that requires an argument to pass that crypttab does not support. --- .../profiles/default/autoinstall/user-data | 1 + .../profiles/default/scripts/post.sh | 26 ++++++++++++++++++- .../profiles/default/scripts/pre.sh | 12 +++++---- 3 files changed, 33 insertions(+), 6 deletions(-) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/autoinstall/user-data b/confluent_osdeploy/ubuntu22.04/profiles/default/autoinstall/user-data index 5b6c9894..7c4181d4 100644 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/autoinstall/user-data +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/autoinstall/user-data @@ -10,6 +10,7 @@ autoinstall: storage: layout: name: lvm +#CRYPTBOOT password: %%CRYPTPASS%% match: path: "%%INSTALLDISK%%" user-data: diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh index d9730889..69e1593e 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh @@ -60,10 +60,12 @@ cp /custom-installation/confluent/bin/apiclient /target/opt/confluent/bin mount -o bind /dev /target/dev mount -o bind /proc /target/proc mount -o bind /sys /target/sys +mount -o bind /run /target/run mount -o bind /sys/firmware/efi/efivars /target/sys/firmware/efi/efivars if [ 1 = $updategrub ]; then chroot /target update-grub fi + echo "Port 22" >> /etc/ssh/sshd_config echo "Port 2222" >> /etc/ssh/sshd_config echo "Match LocalPort 22" >> /etc/ssh/sshd_config @@ -88,8 +90,30 @@ chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d source /target/etc/confluent/functions run_remote_config post + +if [ -f /etc/confluent_lukspass ]; then + $lukspass=$(cat /etc/confluent_lukspass) + chroot /target apt install tpm2-initramfs-tool + chroot /target tpm2-initramfs-tool seal --data "$(lukspass)" > /dev/null + # The default PCR 7 mutates, and crypttab does not provide a way to pass args + cat > /target/usr/bin/tpm2-initramfs-tool.pcr0 << EOF +#!/bin/sh +tpm2-initramfs-tool -p 0 \$* +EOF + chmod 755 /target/usr/bin/tpm2-initramfs-tool.pcr0 + cat > /target/etc/initramfs-tools/hooks/tpm2-initramfs-tool < /dev/console diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index 5db222a7..ee61ac26 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -13,11 +13,6 @@ exec 2>> /var/log/confluent/confluent-pre.log chmod 600 /var/log/confluent/confluent-pre.log cryptboot=$(grep encryptboot: $deploycfg|sed -e 's/^encryptboot: //') -if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then - echo "****Encrypted boot requested, but not implemented for this OS, halting install" > /dev/console - [ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but not implemented for this OS,halting install" >> $(cat /tmp/autoconsdev)) - while :; do sleep 86400; done -fi cat /custom-installation/ssh/*pubkey > /root/.ssh/authorized_keys @@ -45,6 +40,13 @@ if [ ! -e /tmp/installdisk ]; then python3 /custom-installation/getinstalldisk fi sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml +if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then + lukspass=$(head -c 64 < /dev/urandom |base64) + sed -i s!%%CRYPTPASS%%!$lukspass! /autoinstall.yaml + sed -i s!'#CRYPTBOOT'!! /autoinstall.yaml + echo $lukspass > /etc/confluent_lukspass + +fi ) & tail --pid $! -n 0 -F /var/log/confluent/confluent-pre.log > /dev/console From 41b722c3f7d583381008fb86968390033225cba1 Mon Sep 17 00:00:00 2001 From: Markus Hilger Date: Thu, 25 Jul 2024 18:38:23 +0200 Subject: [PATCH 11/51] Use natural sort for lists in json dumps Previously, items were randomly arranged in lists in the json dump. This meant that the JSON files were different after each export. Now they are naturally sorted and identical. This should make it easier to save and compare the JSON dumps in version control systems. --- confluent_server/confluent/config/configmanager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_server/confluent/config/configmanager.py b/confluent_server/confluent/config/configmanager.py index 528924e8..6cbf4604 100644 --- a/confluent_server/confluent/config/configmanager.py +++ b/confluent_server/confluent/config/configmanager.py @@ -2647,7 +2647,7 @@ class ConfigManager(object): dumpdata[confarea][element][attribute]['cryptvalue'] = '!'.join(cryptval) elif isinstance(dumpdata[confarea][element][attribute], set): dumpdata[confarea][element][attribute] = \ - list(dumpdata[confarea][element][attribute]) + confluent.util.natural_sort(list(dumpdata[confarea][element][attribute])) return json.dumps( dumpdata, sort_keys=True, indent=4, separators=(',', ': ')) From 80296b6cbc8c9c2b03b25f03078fce0e2d66d5c6 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Thu, 25 Jul 2024 14:05:10 -0400 Subject: [PATCH 12/51] Point to the C context object rather than python class The OpenSSL variant of Context is a python class, but it does have a C context in it. --- confluent_server/confluent/sockapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_server/confluent/sockapi.py b/confluent_server/confluent/sockapi.py index 8aca0058..86534767 100644 --- a/confluent_server/confluent/sockapi.py +++ b/confluent_server/confluent/sockapi.py @@ -431,7 +431,7 @@ def _tlsstartup(cnn): ctx.use_certificate_file('/etc/confluent/srvcert.pem') ctx.use_privatekey_file('/etc/confluent/privkey.pem') ctx.set_verify(libssln.VERIFY_PEER, lambda *args: True) - ssl_ctx = PySSLContext.from_address(id(ctx)).ctx + ssl_ctx = PySSLContext.from_address(id(ctx._context)).ctx libsslc.SSL_CTX_set_cert_verify_callback(ssl_ctx, verify_stub, 0) cnn = libssl.Connection(ctx, cnn) cnn.set_accept_state() From 298be3b30a385af3c2506ba2737dbb530ac38e1d Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Thu, 25 Jul 2024 14:05:10 -0400 Subject: [PATCH 13/51] Point to the C context object rather than python class The OpenSSL variant of Context is a python class, but it does have a C context in it. --- confluent_server/confluent/sockapi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_server/confluent/sockapi.py b/confluent_server/confluent/sockapi.py index 8aca0058..86534767 100644 --- a/confluent_server/confluent/sockapi.py +++ b/confluent_server/confluent/sockapi.py @@ -431,7 +431,7 @@ def _tlsstartup(cnn): ctx.use_certificate_file('/etc/confluent/srvcert.pem') ctx.use_privatekey_file('/etc/confluent/privkey.pem') ctx.set_verify(libssln.VERIFY_PEER, lambda *args: True) - ssl_ctx = PySSLContext.from_address(id(ctx)).ctx + ssl_ctx = PySSLContext.from_address(id(ctx._context)).ctx libsslc.SSL_CTX_set_cert_verify_callback(ssl_ctx, verify_stub, 0) cnn = libssl.Connection(ctx, cnn) cnn.set_accept_state() From 30aa6f382c47c0a5b04269f96daa20690ae296b3 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Thu, 25 Jul 2024 14:54:15 -0400 Subject: [PATCH 14/51] Ignore duplicate specifications of same key Particularly if traversing a lot of linked configuration, the same key/cert path may come up multiple times, check for equality and if equal, just keep going. --- confluent_server/confluent/certutil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_server/confluent/certutil.py b/confluent_server/confluent/certutil.py index 9a478787..4ac67165 100644 --- a/confluent_server/confluent/certutil.py +++ b/confluent_server/confluent/certutil.py @@ -76,7 +76,7 @@ def get_certificate_paths(): continue kploc = check_apache_config(os.path.join(currpath, fname)) - if keypath and kploc[0]: + if keypath and kploc[0] and keypath != kploc[0]: return None, None # Ambiguous... if kploc[0]: keypath, certpath = kploc From 626f16cb6fcac5a7c9531014766b287ac9ca2d72 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Thu, 25 Jul 2024 14:54:15 -0400 Subject: [PATCH 15/51] Ignore duplicate specifications of same key Particularly if traversing a lot of linked configuration, the same key/cert path may come up multiple times, check for equality and if equal, just keep going. --- confluent_server/confluent/certutil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_server/confluent/certutil.py b/confluent_server/confluent/certutil.py index 9a478787..4ac67165 100644 --- a/confluent_server/confluent/certutil.py +++ b/confluent_server/confluent/certutil.py @@ -76,7 +76,7 @@ def get_certificate_paths(): continue kploc = check_apache_config(os.path.join(currpath, fname)) - if keypath and kploc[0]: + if keypath and kploc[0] and keypath != kploc[0]: return None, None # Ambiguous... if kploc[0]: keypath, certpath = kploc From 956e473fa6ea6fe3af1c61d32f02090ab6b59857 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Thu, 25 Jul 2024 15:25:09 -0400 Subject: [PATCH 16/51] Have SSDP fallback to unverified noderanges when looking at candidates --- confluent_server/confluent/discovery/protocols/ssdp.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/confluent_server/confluent/discovery/protocols/ssdp.py b/confluent_server/confluent/discovery/protocols/ssdp.py index 3c1edc74..12ec4ba7 100644 --- a/confluent_server/confluent/discovery/protocols/ssdp.py +++ b/confluent_server/confluent/discovery/protocols/ssdp.py @@ -251,7 +251,10 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None): break candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None) if candmgrs: - candmgrs = noderange.NodeRange(candmgrs, cfg).nodes + try: + candmgrs = noderange.NodeRange(candmgrs, cfg).nodes + except Exception: + candmgrs = noderange.NodeRange(candmgrs).nodes if collective.get_myname() not in candmgrs: break currtime = time.time() From dc7c9f4a3d324c8881fd312d8132ed8207f64e15 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Thu, 25 Jul 2024 15:25:09 -0400 Subject: [PATCH 17/51] Have SSDP fallback to unverified noderanges when looking at candidates --- confluent_server/confluent/discovery/protocols/ssdp.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/confluent_server/confluent/discovery/protocols/ssdp.py b/confluent_server/confluent/discovery/protocols/ssdp.py index 3c1edc74..12ec4ba7 100644 --- a/confluent_server/confluent/discovery/protocols/ssdp.py +++ b/confluent_server/confluent/discovery/protocols/ssdp.py @@ -251,7 +251,10 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None): break candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None) if candmgrs: - candmgrs = noderange.NodeRange(candmgrs, cfg).nodes + try: + candmgrs = noderange.NodeRange(candmgrs, cfg).nodes + except Exception: + candmgrs = noderange.NodeRange(candmgrs).nodes if collective.get_myname() not in candmgrs: break currtime = time.time() From 1d6009a2f2d58211e031e8290d3367a2937422bb Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 10:33:38 -0400 Subject: [PATCH 18/51] Switch to using systemd-cryptenroll The design more cleanly uses luks slot, but requires providing initramfs hooks. Those hooks are provided now. --- .../profiles/default/scripts/post.sh | 73 +++++++++++++++---- .../profiles/default/scripts/pre.sh | 9 ++- 2 files changed, 65 insertions(+), 17 deletions(-) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh index 69e1593e..2c8be0c0 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh @@ -92,23 +92,66 @@ source /target/etc/confluent/functions run_remote_config post if [ -f /etc/confluent_lukspass ]; then - $lukspass=$(cat /etc/confluent_lukspass) - chroot /target apt install tpm2-initramfs-tool - chroot /target tpm2-initramfs-tool seal --data "$(lukspass)" > /dev/null - # The default PCR 7 mutates, and crypttab does not provide a way to pass args - cat > /target/usr/bin/tpm2-initramfs-tool.pcr0 << EOF -#!/bin/sh -tpm2-initramfs-tool -p 0 \$* -EOF - chmod 755 /target/usr/bin/tpm2-initramfs-tool.pcr0 - cat > /target/etc/initramfs-tools/hooks/tpm2-initramfs-tool </target/etc/initramfs-tools/scripts/local-top/systemdecrypt << EOS +#!/bin/sh +case \$1 in +prereqs) + echo + exit 0 + ;; +esac + +systemdecryptnow() { +. /usr/lib/cryptsetup/functions +local CRYPTTAB_SOURCE=\$(awk '{print \$2}' /systemdecrypt/crypttab) +local CRYPTTAB_NAME=\$(awk '{print \$1}' /systemdecrypt/crypttab) +crypttab_resolve_source +/lib/systemd/systemd-cryptsetup attach "\${CRYPTTAB_NAME}" "\${CRYPTTAB_SOURCE}" none tpm2-device=auto +} + +systemdecryptnow +EOS + chmod 755 /target/etc/initramfs-tools/scripts/local-top/systemdecrypt + cat > /target/etc/initramfs-tools/hooks/systemdecrypt <> \$DESTDIR/scripts/local-top/ORDER + +if [ -f \$DESTDIR/cryptroot/crypttab ]; then + mv \$DESTDIR/cryptroot/crypttab \$DESTDIR/systemdecrypt/crypttab +fi EOF - chmod 755 /target/etc/initramfs-tools/hooks/tpm2-initramfs-tool chroot /target update-initramfs -u fi python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged' diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index ee61ac26..bfe1c7db 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -41,10 +41,15 @@ if [ ! -e /tmp/installdisk ]; then fi sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then - lukspass=$(head -c 64 < /dev/urandom |base64) + if ! grep '#CRYPTBOOT' /autoinstall.yaml > /dev/null; then + echo "****Encrypted boot requested, but the user-data does not have a hook to enable,halting install" > /dev/console + [ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but the user-data does not have a hook to enable,halting install" >> $(cat /tmp/autoconsdev)) + while :; do sleep 86400; done + fi + lukspass=$(head -c 66 < /dev/urandom |base64 -w0) sed -i s!%%CRYPTPASS%%!$lukspass! /autoinstall.yaml sed -i s!'#CRYPTBOOT'!! /autoinstall.yaml - echo $lukspass > /etc/confluent_lukspass + echo -n $lukspass > /etc/confluent_lukspass fi ) & From 58ee85f39ebe558f11108942f18c8966e2cae896 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 11:33:01 -0400 Subject: [PATCH 19/51] Rework Ubuntu addcrypt support The comment based hook is destroyed during early install process. Use python to manipulate the autoinstall file in a more sophisticated way. Also refactor the initramfs hook material to be standalone files. --- .../profiles/default/autoinstall/user-data | 1 - .../profiles/default/scripts/addcrypt | 12 +++++ .../profiles/default/scripts/post.sh | 49 ++----------------- .../profiles/default/scripts/pre.sh | 3 +- .../profiles/default/scripts/systemdecrypt | 17 +++++++ .../default/scripts/systemdecrypt-hook | 22 +++++++++ 6 files changed, 58 insertions(+), 46 deletions(-) create mode 100644 confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt create mode 100644 confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt create mode 100644 confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/autoinstall/user-data b/confluent_osdeploy/ubuntu22.04/profiles/default/autoinstall/user-data index 7c4181d4..5b6c9894 100644 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/autoinstall/user-data +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/autoinstall/user-data @@ -10,7 +10,6 @@ autoinstall: storage: layout: name: lvm -#CRYPTBOOT password: %%CRYPTPASS%% match: path: "%%INSTALLDISK%%" user-data: diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt new file mode 100644 index 00000000..4f2ae905 --- /dev/null +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt @@ -0,0 +1,12 @@ +import yaml +import sys + +ainst = {} +with open('/autoinstall.yaml', 'r') as allin: + ainst = yaml.safe_load(allin) + +ainst['storage']['layout']['password'] = sys.argv[1] + +with open('/autoinstall.yaml', 'w') as allout: + yaml.safe_dump(ainst, allout) + diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh index 2c8be0c0..998f7bda 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh @@ -108,50 +108,11 @@ if [ -f /etc/confluent_lukspass ]; then $lukspass=$(cat /etc/confluent_lukspass) chroot /target apt install libtss2-rc0 PASSWORD=$(lukspass) chroot /target systemd-cryptenroll --tpm2-device=auto $CRYPTTAB_SOURCE - cat >/target/etc/initramfs-tools/scripts/local-top/systemdecrypt << EOS -#!/bin/sh -case \$1 in -prereqs) - echo - exit 0 - ;; -esac - -systemdecryptnow() { -. /usr/lib/cryptsetup/functions -local CRYPTTAB_SOURCE=\$(awk '{print \$2}' /systemdecrypt/crypttab) -local CRYPTTAB_NAME=\$(awk '{print \$1}' /systemdecrypt/crypttab) -crypttab_resolve_source -/lib/systemd/systemd-cryptsetup attach "\${CRYPTTAB_NAME}" "\${CRYPTTAB_SOURCE}" none tpm2-device=auto -} - -systemdecryptnow -EOS - chmod 755 /target/etc/initramfs-tools/scripts/local-top/systemdecrypt - cat > /target/etc/initramfs-tools/hooks/systemdecrypt <> \$DESTDIR/scripts/local-top/ORDER - -if [ -f \$DESTDIR/cryptroot/crypttab ]; then - mv \$DESTDIR/cryptroot/crypttab \$DESTDIR/systemdecrypt/crypttab -fi -EOF + fetch_remote systemdecrypt + mv systemdecrypt /target/etc/initramfs-tools/scripts/local-top/systemdecrypt + fetch_remote systemdecrypt-hook + mv systemdecrypt-hook /target/etc/initramfs-tools/hooks/systemdecrypt + chmod 755 /target/etc/initramfs-tools/scripts/local-top/systemdecrypt /target/etc/initramfs-tools/hooks/systemdecrypt chroot /target update-initramfs -u fi python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged' diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index bfe1c7db..db0e967d 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -41,12 +41,13 @@ if [ ! -e /tmp/installdisk ]; then fi sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then + lukspass=$(head -c 66 < /dev/urandom |base64 -w0) + run_remote_python addcrypt if ! grep '#CRYPTBOOT' /autoinstall.yaml > /dev/null; then echo "****Encrypted boot requested, but the user-data does not have a hook to enable,halting install" > /dev/console [ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but the user-data does not have a hook to enable,halting install" >> $(cat /tmp/autoconsdev)) while :; do sleep 86400; done fi - lukspass=$(head -c 66 < /dev/urandom |base64 -w0) sed -i s!%%CRYPTPASS%%!$lukspass! /autoinstall.yaml sed -i s!'#CRYPTBOOT'!! /autoinstall.yaml echo -n $lukspass > /etc/confluent_lukspass diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt new file mode 100644 index 00000000..6f0cbaed --- /dev/null +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt @@ -0,0 +1,17 @@ +#!/bin/sh +case $1 in +prereqs) + echo + exit 0 + ;; +esac + +systemdecryptnow() { +. /usr/lib/cryptsetup/functions +local CRYPTTAB_SOURCE=$(awk '{print $2}' /systemdecrypt/crypttab) +local CRYPTTAB_NAME=$(awk '{print $1}' /systemdecrypt/crypttab) +crypttab_resolve_source +/lib/systemd/systemd-cryptsetup attach "${CRYPTTAB_NAME}" "${CRYPTTAB_SOURCE}" none tpm2-device=auto +} + +systemdecryptnow diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook new file mode 100644 index 00000000..48c9d16d --- /dev/null +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook @@ -0,0 +1,22 @@ +#!/bin/sh +case "$1" in + prereqs) + echo + exit 0 + ;; +esac + +. /usr/share/initramfs-tools/hook-functions +mkdir -p $DESTDIR/systemdecrypt +copy_exec /lib/systemd/systemd-cryptsetup /lib/systemd +for i in /lib/x86_64-linux-gnu/libtss2* +do + copy_exec ${i} /lib/x86_64-linux-gnu +done +mkdir -p $DESTDIR/scripts/local-top + +echo /scripts/local-top/systemdecrypt >> $DESTDIR/scripts/local-top/ORDER + +if [ -f $DESTDIR/cryptroot/crypttab ]; then + mv $DESTDIR/cryptroot/crypttab $DESTDIR/systemdecrypt/crypttab +fi From f482d2ead993ae651f742e43b5977406e5d64cb4 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 11:35:49 -0400 Subject: [PATCH 20/51] Amend crypt hook check The comment was changed, check for password instead. --- confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index db0e967d..02402a99 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -43,7 +43,7 @@ sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then lukspass=$(head -c 66 < /dev/urandom |base64 -w0) run_remote_python addcrypt - if ! grep '#CRYPTBOOT' /autoinstall.yaml > /dev/null; then + if ! grep 'pasword:' /autoinstall.yaml > /dev/null; then echo "****Encrypted boot requested, but the user-data does not have a hook to enable,halting install" > /dev/console [ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but the user-data does not have a hook to enable,halting install" >> $(cat /tmp/autoconsdev)) while :; do sleep 86400; done From 1ddf735590bd6479dc7fa797a88869adaa963283 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 11:50:53 -0400 Subject: [PATCH 21/51] Fix omitted argument to addcrypt --- confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index 02402a99..bd9d1d60 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -42,7 +42,7 @@ fi sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then lukspass=$(head -c 66 < /dev/urandom |base64 -w0) - run_remote_python addcrypt + run_remote_python addcrypt "$lukspass" if ! grep 'pasword:' /autoinstall.yaml > /dev/null; then echo "****Encrypted boot requested, but the user-data does not have a hook to enable,halting install" > /dev/console [ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but the user-data does not have a hook to enable,halting install" >> $(cat /tmp/autoconsdev)) From c1747ad24ca961b10df0e94819b58861f7b2f0bb Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 11:54:10 -0400 Subject: [PATCH 22/51] Correct spelling of key for luks check --- confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index bd9d1d60..77a16906 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -43,7 +43,7 @@ sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then lukspass=$(head -c 66 < /dev/urandom |base64 -w0) run_remote_python addcrypt "$lukspass" - if ! grep 'pasword:' /autoinstall.yaml > /dev/null; then + if ! grep 'password:' /autoinstall.yaml > /dev/null; then echo "****Encrypted boot requested, but the user-data does not have a hook to enable,halting install" > /dev/console [ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but the user-data does not have a hook to enable,halting install" >> $(cat /tmp/autoconsdev)) while :; do sleep 86400; done From c563f48c71acfd9ffc44c1f88caef339b527de9d Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 12:30:41 -0400 Subject: [PATCH 23/51] Fix assignment of lukspass variable. --- confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh index 998f7bda..28d45e41 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh @@ -105,7 +105,7 @@ if [ -f /etc/confluent_lukspass ]; then wall "Unable to find $CRYPTTAB_SOURCE, halting install" while :; do sleep 86400; done fi - $lukspass=$(cat /etc/confluent_lukspass) + lukspass=$(cat /etc/confluent_lukspass) chroot /target apt install libtss2-rc0 PASSWORD=$(lukspass) chroot /target systemd-cryptenroll --tpm2-device=auto $CRYPTTAB_SOURCE fetch_remote systemdecrypt From 7a602f58b2e1b62a0c18b4f5e9b72d20817bbf5d Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 13:47:13 -0400 Subject: [PATCH 24/51] Fixes for ubuntu profile tpm support --- confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh index 28d45e41..4af3a01f 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh @@ -107,7 +107,7 @@ if [ -f /etc/confluent_lukspass ]; then fi lukspass=$(cat /etc/confluent_lukspass) chroot /target apt install libtss2-rc0 - PASSWORD=$(lukspass) chroot /target systemd-cryptenroll --tpm2-device=auto $CRYPTTAB_SOURCE + PASSWORD=$lukspass chroot /target systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs="" $CRYPTTAB_SOURCE fetch_remote systemdecrypt mv systemdecrypt /target/etc/initramfs-tools/scripts/local-top/systemdecrypt fetch_remote systemdecrypt-hook From 2df902e80e2df7782f6c594c3195862d1c04ea69 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 14:07:54 -0400 Subject: [PATCH 25/51] Remove luks password from argv Pass the luks password by environment variable instead. --- .../ubuntu22.04/profiles/default/scripts/addcrypt | 4 ++-- .../ubuntu22.04/profiles/default/scripts/pre.sh | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt index 4f2ae905..750753c1 100644 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt @@ -1,11 +1,11 @@ import yaml -import sys +import os ainst = {} with open('/autoinstall.yaml', 'r') as allin: ainst = yaml.safe_load(allin) -ainst['storage']['layout']['password'] = sys.argv[1] +ainst['storage']['layout']['password'] = os.environ['lukspass'] with open('/autoinstall.yaml', 'w') as allout: yaml.safe_dump(ainst, allout) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index 77a16906..4ec3f822 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -42,7 +42,8 @@ fi sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then lukspass=$(head -c 66 < /dev/urandom |base64 -w0) - run_remote_python addcrypt "$lukspass" + export lukspass + run_remote_python addcrypt if ! grep 'password:' /autoinstall.yaml > /dev/null; then echo "****Encrypted boot requested, but the user-data does not have a hook to enable,halting install" > /dev/console [ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but the user-data does not have a hook to enable,halting install" >> $(cat /tmp/autoconsdev)) From 332068074d1b93d1b4b9a85e38643ba0d93f85ba Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 16:54:58 -0400 Subject: [PATCH 26/51] Extend systemdecrypt hook to support Ubuntu 24.04 Ubuntu 240.4 systemd-cryptsetup now has an external dependency. --- .../ubuntu22.04/profiles/default/scripts/systemdecrypt-hook | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook index 48c9d16d..ee602c7c 100644 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook @@ -13,6 +13,10 @@ for i in /lib/x86_64-linux-gnu/libtss2* do copy_exec ${i} /lib/x86_64-linux-gnu done +if [ -f /lib/x86_64-linux-gnu/cryptsetup/libcryptsetup-token-systemd-tpm2.so ]; then + mkdir -p $DESTDIR/lib/x86_64-linux-gnu/cryptsetup + copy_exec /lib/x86_64-linux-gnu/cryptsetup/libcryptsetup-token-systemd-tpm2.so /lib/x86_64-linux-gnu/cryptsetup +fi mkdir -p $DESTDIR/scripts/local-top echo /scripts/local-top/systemdecrypt >> $DESTDIR/scripts/local-top/ORDER From 1af898dcb8a07ccf537bf41cb592066ce45fc0e2 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 17:43:51 -0400 Subject: [PATCH 27/51] Fix encryptboot on EL8/EL9 --- confluent_osdeploy/el8/profiles/default/scripts/pre.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/el8/profiles/default/scripts/pre.sh b/confluent_osdeploy/el8/profiles/default/scripts/pre.sh index cd831360..89d989b8 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/el8/profiles/default/scripts/pre.sh @@ -114,7 +114,7 @@ confluentpython /etc/confluent/apiclient /confluent-public/os/$confluent_profile grep '^%include /tmp/partitioning' /tmp/kickstart.* > /dev/null || rm /tmp/installdisk if [ -e /tmp/installdisk -a ! -e /tmp/partitioning ]; then INSTALLDISK=$(cat /tmp/installdisk) - sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e s/%%LUKSHOOK%%/$LUKSPARTY/ /tmp/partitioning.template > /tmp/partitioning + sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e "s/%%LUKSHOOK%%/$LUKSPARTY/" /tmp/partitioning.template > /tmp/partitioning vgchange -a n >& /dev/null wipefs -a -f /dev/$INSTALLDISK >& /dev/null fi From bee9f1819717b2c5475671d9d77293f1bf11ca47 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 26 Jul 2024 17:59:42 -0400 Subject: [PATCH 28/51] Tolerate / in the apikey for LUKS setup The apikey is highly likely to have a /, and so we need to use something not in the base64 alphabet as a delimiter. --- confluent_osdeploy/el8/profiles/default/scripts/pre.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/el8/profiles/default/scripts/pre.sh b/confluent_osdeploy/el8/profiles/default/scripts/pre.sh index 89d989b8..4deff814 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/el8/profiles/default/scripts/pre.sh @@ -114,7 +114,7 @@ confluentpython /etc/confluent/apiclient /confluent-public/os/$confluent_profile grep '^%include /tmp/partitioning' /tmp/kickstart.* > /dev/null || rm /tmp/installdisk if [ -e /tmp/installdisk -a ! -e /tmp/partitioning ]; then INSTALLDISK=$(cat /tmp/installdisk) - sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e "s/%%LUKSHOOK%%/$LUKSPARTY/" /tmp/partitioning.template > /tmp/partitioning + sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e "s!%%LUKSHOOK%%!$LUKSPARTY!" /tmp/partitioning.template > /tmp/partitioning vgchange -a n >& /dev/null wipefs -a -f /dev/$INSTALLDISK >& /dev/null fi From 329f2b4485fc26005525bd11616cc95d1641e0d3 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 29 Jul 2024 10:17:14 -0400 Subject: [PATCH 29/51] Amend cryptboot implementation for Ubuntu 22/24, EL8/EL9 Provide mechanism for administrator to place a custom key for potential interactive recovery into /var/lib/confluent/private/os//pending/luks.key If not provided, generate a unique one for each install. Either way, persist the key in /etc/confluent/luks.key, to facilitate later resealing if the user wants (clevis nor systemd prior to 256 supports unlock via TPM2, so keyfile is required for now). Migrating to otherwise escrowed passphrases and/or sealing to specific TPMs will be left to operators and/or third parties. --- confluent_osdeploy/el8/profiles/default/scripts/pre.sh | 10 ++++++++-- .../el8/profiles/default/scripts/tpm_luks.sh | 5 +++-- .../ubuntu22.04/profiles/default/scripts/post.sh | 2 ++ .../ubuntu22.04/profiles/default/scripts/pre.sh | 7 +++++-- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/confluent_osdeploy/el8/profiles/default/scripts/pre.sh b/confluent_osdeploy/el8/profiles/default/scripts/pre.sh index 4deff814..880d22ac 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/el8/profiles/default/scripts/pre.sh @@ -90,8 +90,14 @@ touch /tmp/cryptpkglist touch /tmp/pkglist touch /tmp/addonpackages if [ "$cryptboot" == "tpm2" ]; then - LUKSPARTY="--encrypted --passphrase=$(cat /etc/confluent/confluent.apikey)" - echo $cryptboot >> /tmp/cryptboot + lukspass=$(python3 /opt/confluent/bin/apiclient /confluent-api/self/profileprivate/pending/luks.key 2> /dev/null) + if [ -z "$lukspass" ]; then + lukspass=$(python3 -c 'import os;import base64;print(base64.b64encode(os.urandom(66)).decode())') + fi + echo $lukspass > /etc/confluent/luks.key + chmod 000 /etc/confluent/luks.key + LUKSPARTY="--encrypted --passphrase=$lukspass" + echo $cryptboot >> /tmp/cryptboot echo clevis-dracut >> /tmp/cryptpkglist fi diff --git a/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh b/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh index df9c857f..c457ffd4 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh +++ b/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh @@ -1,4 +1,5 @@ #!/bin/sh cryptdisk=$(blkid -t TYPE="crypto_LUKS"|sed -e s/:.*//) -clevis luks bind -f -d $cryptdisk -k - tpm2 '{}' < /etc/confluent/confluent.apikey -cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey +clevis luks bind -f -d $cryptdisk -k /etc/cofluent/luks.key tpm2 '{}' +chmod 000 /etc/confluent/luks.key +#cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh index 4af3a01f..a86695ca 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh @@ -105,6 +105,8 @@ if [ -f /etc/confluent_lukspass ]; then wall "Unable to find $CRYPTTAB_SOURCE, halting install" while :; do sleep 86400; done fi + cp /etc/confluent_lukspass /target/etc/confluent/luks.key + chmod 000 /target/etc/confluent/luks.key lukspass=$(cat /etc/confluent_lukspass) chroot /target apt install libtss2-rc0 PASSWORD=$lukspass chroot /target systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs="" $CRYPTTAB_SOURCE diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index 4ec3f822..5b609565 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -41,7 +41,10 @@ if [ ! -e /tmp/installdisk ]; then fi sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then - lukspass=$(head -c 66 < /dev/urandom |base64 -w0) + lukspass=$(python3 /opt/confluent/bin/apiclient /confluent-api/self/profileprivate/pending/luks.key 2> /dev/null) + if [ -z "$lukspass" ]; then + lukspass=$(head -c 66 < /dev/urandom |base64 -w0) + fi export lukspass run_remote_python addcrypt if ! grep 'password:' /autoinstall.yaml > /dev/null; then @@ -52,7 +55,7 @@ if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "n sed -i s!%%CRYPTPASS%%!$lukspass! /autoinstall.yaml sed -i s!'#CRYPTBOOT'!! /autoinstall.yaml echo -n $lukspass > /etc/confluent_lukspass - + chmod 000 /etc/confluent_lukspass fi ) & tail --pid $! -n 0 -F /var/log/confluent/confluent-pre.log > /dev/console From e6dc383d2598b5c6a9d851b9ed7b5894a25e0532 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 29 Jul 2024 11:22:07 -0400 Subject: [PATCH 30/51] Fix mistake in EL8/EL9 LUKS --- confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh b/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh index c457ffd4..359c46f6 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh +++ b/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh @@ -1,5 +1,5 @@ #!/bin/sh cryptdisk=$(blkid -t TYPE="crypto_LUKS"|sed -e s/:.*//) -clevis luks bind -f -d $cryptdisk -k /etc/cofluent/luks.key tpm2 '{}' +clevis luks bind -f -d $cryptdisk -k - tpm2 '{}' < /etc/confluent/luks.key chmod 000 /etc/confluent/luks.key #cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey From 1c4f1ae8175bcd03c8aa0e2bae6507ad23466ceb Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 29 Jul 2024 15:21:10 -0400 Subject: [PATCH 31/51] Try to add ntp and timezones to Ubuntu scripted install --- .../profiles/default/scripts/mergetime | 26 +++++++++++++++++++ .../profiles/default/scripts/pre.sh | 1 + 2 files changed, 27 insertions(+) create mode 100644 confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime new file mode 100644 index 00000000..0cacc1e8 --- /dev/null +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime @@ -0,0 +1,26 @@ +#!/usr/bin/python3 +import yaml +import os + +ainst = {} +with open('/autoinstall.yaml', 'r') as allin: + ainst = yaml.safe_load(allin) + +tz = None +ntps = [] +with open('/etc/confluent/confluent.deploycfg', 'r') as confluentdeploycfg: + dcfg = yaml.safe_load(confluentdeploycfg) + tz = dcfg['timezone'] + ntps = dcfg.get('ntpservers', []) + +if ntps and not ainst.get('ntp', None): + ainst['ntp'] = {} + ainst['ntp']['enabled'] = True + ainst['servers'] = ntps + +if tz and not ainst.get('timezone'): + ainst['timezone'] = tz + +with open('/autoinstall.yaml', 'w') as allout: + yaml.safe_dump(ainst, allout) + diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index 5b609565..ad55120a 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -40,6 +40,7 @@ if [ ! -e /tmp/installdisk ]; then python3 /custom-installation/getinstalldisk fi sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml +run_remote_python mergetime if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then lukspass=$(python3 /opt/confluent/bin/apiclient /confluent-api/self/profileprivate/pending/luks.key 2> /dev/null) if [ -z "$lukspass" ]; then From 71ca9ef76c7abed0752931d1545b447288f3b7c0 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 29 Jul 2024 15:57:34 -0400 Subject: [PATCH 32/51] Fix path to ntp servers in user-data mod for ubuntu --- .../ubuntu22.04/profiles/default/scripts/mergetime | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime index 0cacc1e8..7edb2632 100644 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime @@ -16,7 +16,7 @@ with open('/etc/confluent/confluent.deploycfg', 'r') as confluentdeploycfg: if ntps and not ainst.get('ntp', None): ainst['ntp'] = {} ainst['ntp']['enabled'] = True - ainst['servers'] = ntps + ainst['ntp']['servers'] = ntps if tz and not ainst.get('timezone'): ainst['timezone'] = tz From 89bd7c6053c0cd688e9b009b3cb5f07d58a5ecae Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Thu, 1 Aug 2024 09:40:39 -0400 Subject: [PATCH 33/51] Force load IB/OPA modules in case of IB boot Ubuntu diskless was not working with boot over IB --- .../initramfs/scripts/init-premount/confluent | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/confluent_osdeploy/ubuntu20.04-diskless/initramfs/scripts/init-premount/confluent b/confluent_osdeploy/ubuntu20.04-diskless/initramfs/scripts/init-premount/confluent index 2f7094b9..a4ca41cf 100644 --- a/confluent_osdeploy/ubuntu20.04-diskless/initramfs/scripts/init-premount/confluent +++ b/confluent_osdeploy/ubuntu20.04-diskless/initramfs/scripts/init-premount/confluent @@ -58,6 +58,10 @@ if ! grep console= /proc/cmdline > /dev/null; then echo "Automatic console configured for $autocons" fi echo sshd:x:30:30:SSH User:/var/empty/sshd:/sbin/nologin >> /etc/passwd +modprobe ib_ipoib +modprobe ib_umad +modprobe hfi1 +modprobe mlx5_ib cd /sys/class/net for nic in *; do ip link set $nic up From acce4de739c54bd1dc7c87bac85617274da0971f Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 2 Aug 2024 11:57:04 -0400 Subject: [PATCH 34/51] Add support for an OpenBMC modification While stock OpenBmc does not care about subprotocols, some implementations use it as a carrier for the XSRF-TOKEN. Since base OpenBmc ignores it, we just offer it to any implementation just in case. --- confluent_server/confluent/plugins/console/openbmc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_server/confluent/plugins/console/openbmc.py b/confluent_server/confluent/plugins/console/openbmc.py index 17acae7c..8677ce17 100644 --- a/confluent_server/confluent/plugins/console/openbmc.py +++ b/confluent_server/confluent/plugins/console/openbmc.py @@ -141,7 +141,7 @@ class TsmConsole(conapi.Console): bmc = prefix + ']' self.ws = WrappedWebSocket(host=bmc) self.ws.set_verify_callback(kv) - self.ws.connect('wss://{0}/console0'.format(self.bmc), host=bmc, cookie='XSRF-TOKEN={0}; SESSION={1}'.format(wc.cookies['XSRF-TOKEN'], wc.cookies['SESSION'])) + self.ws.connect('wss://{0}/console0'.format(self.bmc), host=bmc, cookie='XSRF-TOKEN={0}; SESSION={1}'.format(wc.cookies['XSRF-TOKEN'], wc.cookies['SESSION']), subprotocols=[wc.cookies['XSRF-TOKEN']]) self.connected = True eventlet.spawn_n(self.recvdata) return From 4b6d41d2f82e1b0935542f9173a82705f8da1f6e Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Fri, 2 Aug 2024 17:35:39 -0400 Subject: [PATCH 35/51] Begin work to support V4 Lenovo servers V4 Lenovo servers will have XCC3, and will have differences and mark an unambiguously redfish capable onboarding process. For now identify XCC3 variants and mark them, stubbing them to the xcc handler. An XCC3 handler will be made basing on the generic redfishbmc handler with accomodations for XCC specific data (e.g. DeviceDescription attributes and the Lenovo default user/password choice). --- confluent_server/confluent/discovery/core.py | 3 +++ .../confluent/discovery/protocols/ssdp.py | 11 +++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/confluent_server/confluent/discovery/core.py b/confluent_server/confluent/discovery/core.py index dfb50b9f..bb4c99df 100644 --- a/confluent_server/confluent/discovery/core.py +++ b/confluent_server/confluent/discovery/core.py @@ -113,6 +113,7 @@ nodehandlers = { 'service:lenovo-smm': smm, 'service:lenovo-smm2': smm, 'lenovo-xcc': xcc, + 'lenovo-xcc3': xcc, 'service:management-hardware.IBM:integrated-management-module2': imm, 'pxe-client': pxeh, 'onie-switch': None, @@ -132,6 +133,7 @@ servicenames = { 'service:lenovo-smm2': 'lenovo-smm2', 'affluent-switch': 'affluent-switch', 'lenovo-xcc': 'lenovo-xcc', + 'lenovo-xcc3': 'lenovo-xcc3', #'openbmc': 'openbmc', 'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2', 'service:io-device.Lenovo:management-module': 'lenovo-switch', @@ -147,6 +149,7 @@ servicebyname = { 'lenovo-smm2': 'service:lenovo-smm2', 'affluent-switch': 'affluent-switch', 'lenovo-xcc': 'lenovo-xcc', + 'lenovo-xcc3': 'lenovo-xcc3', 'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2', 'lenovo-switch': 'service:io-device.Lenovo:management-module', 'thinkagile-storage': 'service:thinkagile-storagebmc', diff --git a/confluent_server/confluent/discovery/protocols/ssdp.py b/confluent_server/confluent/discovery/protocols/ssdp.py index 12ec4ba7..34b4f6d0 100644 --- a/confluent_server/confluent/discovery/protocols/ssdp.py +++ b/confluent_server/confluent/discovery/protocols/ssdp.py @@ -431,18 +431,25 @@ def check_fish(urldata, port=443, verifycallback=None): url, data = urldata try: wc = webclient.SecureHTTPConnection(_get_svrip(data), port, verifycallback=verifycallback, timeout=1.5) - peerinfo = wc.grab_json_response(url) + peerinfo = wc.grab_json_response(url, headers={'Accept': 'application/json'}) except socket.error: return None if url == '/DeviceDescription.json': + if not peerinfo: + return None try: peerinfo = peerinfo[0] + except KeyError: + peerinfo['xcc-variant'] = '3' + except IndexError: ++ return None + try: myuuid = peerinfo['node-uuid'].lower() if '-' not in myuuid: myuuid = '-'.join([myuuid[:8], myuuid[8:12], myuuid[12:16], myuuid[16:20], myuuid[20:]]) data['uuid'] = myuuid data['attributes'] = peerinfo - data['services'] = ['lenovo-xcc'] + data['services'] = ['lenovo-xcc'] if 'xcc-variant' not in peerinfo else ['lenovo-xcc' + peerinfo['xcc-variant']] return data except (IndexError, KeyError): return None From e9d4174ce5372e3538a539b3661d200ab04202d8 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 5 Aug 2024 08:35:10 -0400 Subject: [PATCH 36/51] Reapply "Add MegaRAC discovery support for recent MegaRAC" This reverts commit 9d979256eb2c8f96e6a2c334beb57a504eb30f02. --- confluent_server/confluent/discovery/core.py | 9 +- .../confluent/discovery/handlers/megarac.py | 51 ++++ .../discovery/handlers/redfishbmc.py | 269 ++++++++++++++++++ .../confluent/discovery/protocols/ssdp.py | 52 +++- 4 files changed, 366 insertions(+), 15 deletions(-) create mode 100644 confluent_server/confluent/discovery/handlers/megarac.py create mode 100644 confluent_server/confluent/discovery/handlers/redfishbmc.py diff --git a/confluent_server/confluent/discovery/core.py b/confluent_server/confluent/discovery/core.py index bb4c99df..b734cece 100644 --- a/confluent_server/confluent/discovery/core.py +++ b/confluent_server/confluent/discovery/core.py @@ -74,6 +74,7 @@ import confluent.discovery.handlers.tsm as tsm import confluent.discovery.handlers.pxe as pxeh import confluent.discovery.handlers.smm as smm import confluent.discovery.handlers.xcc as xcc +import confluent.discovery.handlers.megarac as megarac import confluent.exceptions as exc import confluent.log as log import confluent.messages as msg @@ -114,6 +115,7 @@ nodehandlers = { 'service:lenovo-smm2': smm, 'lenovo-xcc': xcc, 'lenovo-xcc3': xcc, + 'megarac-bmc': megarac, 'service:management-hardware.IBM:integrated-management-module2': imm, 'pxe-client': pxeh, 'onie-switch': None, @@ -134,6 +136,7 @@ servicenames = { 'affluent-switch': 'affluent-switch', 'lenovo-xcc': 'lenovo-xcc', 'lenovo-xcc3': 'lenovo-xcc3', + 'megarac-bmc': 'megarac-bmc', #'openbmc': 'openbmc', 'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2', 'service:io-device.Lenovo:management-module': 'lenovo-switch', @@ -150,6 +153,7 @@ servicebyname = { 'affluent-switch': 'affluent-switch', 'lenovo-xcc': 'lenovo-xcc', 'lenovo-xcc3': 'lenovo-xcc3', + 'megarac-bmc': 'megarac-bmc', 'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2', 'lenovo-switch': 'service:io-device.Lenovo:management-module', 'thinkagile-storage': 'service:thinkagile-storagebmc', @@ -456,7 +460,7 @@ def iterate_addrs(addrs, countonly=False): yield 1 return yield addrs - + def _parameterize_path(pathcomponents): listrequested = False childcoll = True @@ -545,7 +549,7 @@ def handle_api_request(configmanager, inputdata, operation, pathcomponents): if len(pathcomponents) > 2: raise Exception('TODO') currsubs = get_subscriptions() - return [msg.ChildCollection(x) for x in currsubs] + return [msg.ChildCollection(x) for x in currsubs] elif operation == 'retrieve': return handle_read_api_request(pathcomponents) elif (operation in ('update', 'create') and @@ -1706,3 +1710,4 @@ if __name__ == '__main__': start_detection() while True: eventlet.sleep(30) + diff --git a/confluent_server/confluent/discovery/handlers/megarac.py b/confluent_server/confluent/discovery/handlers/megarac.py new file mode 100644 index 00000000..d7d8786a --- /dev/null +++ b/confluent_server/confluent/discovery/handlers/megarac.py @@ -0,0 +1,51 @@ +# Copyright 2024 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import confluent.discovery.handlers.redfishbmc as redfishbmc +import eventlet.support.greendns + + +getaddrinfo = eventlet.support.greendns.getaddrinfo + + +class NodeHandler(redfishbmc.NodeHandler): + + def get_firmware_default_account_info(self): + return ('admin', 'admin') + + +def remote_nodecfg(nodename, cfm): + cfg = cfm.get_node_attributes( + nodename, 'hardwaremanagement.manager') + ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get( + 'value', None) + ipaddr = ipaddr.split('/', 1)[0] + ipaddr = getaddrinfo(ipaddr, 0)[0][-1] + if not ipaddr: + raise Exception('Cannot remote configure a system without known ' + 'address') + info = {'addresses': [ipaddr]} + nh = NodeHandler(info, cfm) + nh.config(nodename) + + +if __name__ == '__main__': + import confluent.config.configmanager as cfm + c = cfm.ConfigManager(None) + import sys + info = {'addresses': [[sys.argv[1]]]} + print(repr(info)) + testr = NodeHandler(info, c) + testr.config(sys.argv[2]) + diff --git a/confluent_server/confluent/discovery/handlers/redfishbmc.py b/confluent_server/confluent/discovery/handlers/redfishbmc.py new file mode 100644 index 00000000..eed401de --- /dev/null +++ b/confluent_server/confluent/discovery/handlers/redfishbmc.py @@ -0,0 +1,269 @@ +# Copyright 2024 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import confluent.discovery.handlers.generic as generic +import confluent.exceptions as exc +import confluent.netutil as netutil +import confluent.util as util +import eventlet +import eventlet.support.greendns +import json +try: + from urllib import urlencode +except ImportError: + from urllib.parse import urlencode + +getaddrinfo = eventlet.support.greendns.getaddrinfo + +webclient = eventlet.import_patched('pyghmi.util.webclient') + +def get_host_interface_urls(wc, mginfo): + returls = [] + hifurl = mginfo.get('HostInterfaces', {}).get('@odata.id', None) + if not hifurl: + return None + hifinfo = wc.grab_json_response(hifurl) + hifurls = hifinfo.get('Members', []) + for hifurl in hifurls: + hifurl = hifurl['@odata.id'] + hifinfo = wc.grab_json_response(hifurl) + acturl = hifinfo.get('ManagerEthernetInterface', {}).get('@odata.id', None) + if acturl: + returls.append(acturl) + return returls + + +class NodeHandler(generic.NodeHandler): + devname = 'BMC' + + def __init__(self, info, configmanager): + self.trieddefault = None + self.targuser = None + self.curruser = None + self.currpass = None + self.targpass = None + self.nodename = None + self.csrftok = None + self.channel = None + self.atdefault = True + super(NodeHandler, self).__init__(info, configmanager) + + def get_firmware_default_account_info(self): + raise Exception('This must be subclassed') + + def scan(self): + c = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert) + i = c.grab_json_response('/redfish/v1/') + uuid = i.get('UUID', None) + if uuid: + self.info['uuid'] = uuid.lower() + + def validate_cert(self, certificate): + # broadly speaking, merely checks consistency moment to moment, + # but if https_cert gets stricter, this check means something + fprint = util.get_fingerprint(self.https_cert) + return util.cert_matches(fprint, certificate) + + def _get_wc(self): + defuser, defpass = self.get_firmware_default_account_info() + wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert) + wc.set_basic_credentials(defuser, defpass) + wc.set_header('Content-Type', 'application/json') + authmode = 0 + if not self.trieddefault: + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status == 403: + self.trieddefault = True + chgurl = None + rsp = json.loads(rsp) + currerr = rsp.get('error', {}) + ecode = currerr.get('code', None) + if ecode.endswith('PasswordChangeRequired'): + for einfo in currerr.get('@Message.ExtendedInfo', []): + if einfo.get('MessageId', None).endswith('PasswordChangeRequired'): + for msgarg in einfo.get('MessageArgs'): + chgurl = msgarg + break + if chgurl: + if self.targpass == defpass: + raise Exception("Must specify a non-default password to onboard this BMC") + wc.set_header('If-Match', '*') + cpr = wc.grab_json_response_with_status(chgurl, {'Password': self.targpass}, method='PATCH') + if cpr[1] >= 200 and cpr[1] < 300: + self.curruser = defuser + self.currpass = self.targpass + wc.set_basic_credentials(self.curruser, self.currpass) + _, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + tries = 10 + while status >= 300 and tries: + eventlet.sleep(1) + _, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + return wc + + if status > 400: + self.trieddefault = True + if status == 401: + wc.set_basic_credentials(self.DEFAULT_USER, self.targpass) + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status == 200: # Default user still, but targpass + self.currpass = self.targpass + self.curruser = defuser + return wc + elif self.targuser != defuser: + wc.set_basic_credentials(self.targuser, self.targpass) + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status != 200: + raise Exception("Target BMC does not recognize firmware default credentials nor the confluent stored credential") + else: + self.curruser = defuser + self.currpass = defpass + return wc + if self.curruser: + wc.set_basic_credentials(self.curruser, self.currpass) + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status != 200: + return None + return wc + wc.set_basic_credentials(self.targuser, self.targpass) + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status != 200: + return None + self.curruser = self.targuser + self.currpass = self.targpass + return wc + + def config(self, nodename): + self.nodename = nodename + creds = self.configmanager.get_node_attributes( + nodename, ['secret.hardwaremanagementuser', + 'secret.hardwaremanagementpassword', + 'hardwaremanagement.manager', 'hardwaremanagement.method', 'console.method'], + True) + cd = creds.get(nodename, {}) + defuser, defpass = self.get_firmware_default_account_info() + user, passwd, _ = self.get_node_credentials( + nodename, creds, defuser, defpass) + user = util.stringify(user) + passwd = util.stringify(passwd) + self.targuser = user + self.targpass = passwd + wc = self._get_wc() + srvroot, status = wc.grab_json_response_with_status('/redfish/v1/') + curruserinfo = {} + authupdate = {} + wc.set_header('Content-Type', 'application/json') + if user != self.curruser: + authupdate['UserName'] = user + if passwd != self.currpass: + authupdate['Password'] = passwd + if authupdate: + targaccturl = None + asrv = srvroot.get('AccountService', {}).get('@odata.id') + rsp, status = wc.grab_json_response_with_status(asrv) + accts = rsp.get('Accounts', {}).get('@odata.id') + rsp, status = wc.grab_json_response_with_status(accts) + accts = rsp.get('Members', []) + for accturl in accts: + accturl = accturl.get('@odata.id', '') + if accturl: + rsp, status = wc.grab_json_response_with_status(accturl) + if rsp.get('UserName', None) == self.curruser: + targaccturl = accturl + break + else: + raise Exception("Unable to identify Account URL to modify on this BMC") + rsp, status = wc.grab_json_response_with_status(targaccturl, authupdate, method='PATCH') + if status >= 300: + raise Exception("Failed attempting to update credentials on BMC") + wc.set_basic_credentials(user, passwd) + _, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + tries = 10 + while tries and status >= 300: + tries -= 1 + eventlet.sleep(1.0) + _, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if ('hardwaremanagement.manager' in cd and + cd['hardwaremanagement.manager']['value'] and + not cd['hardwaremanagement.manager']['value'].startswith( + 'fe80::')): + newip = cd['hardwaremanagement.manager']['value'] + newip = newip.split('/', 1)[0] + newipinfo = getaddrinfo(newip, 0)[0] + newip = newipinfo[-1][0] + if ':' in newip: + raise exc.NotImplementedException('IPv6 remote config TODO') + mgrs = srvroot['Managers']['@odata.id'] + rsp = wc.grab_json_response(mgrs) + if len(rsp['Members']) != 1: + raise Exception("Can not handle multiple Managers") + mgrurl = rsp['Members'][0]['@odata.id'] + mginfo = wc.grab_json_response(mgrurl) + hifurls = get_host_interface_urls(wc, mginfo) + mgtnicinfo = mginfo['EthernetInterfaces']['@odata.id'] + mgtnicinfo = wc.grab_json_response(mgtnicinfo) + mgtnics = [x['@odata.id'] for x in mgtnicinfo.get('Members', [])] + actualnics = [] + for candnic in mgtnics: + if candnic in hifurls: + continue + actualnics.append(candnic) + if len(actualnics) != 1: + raise Exception("Multi-interface BMCs are not supported currently") + currnet = wc.grab_json_response(actualnics[0]) + netconfig = netutil.get_nic_config(self.configmanager, nodename, ip=newip) + newconfig = { + "Address": newip, + "SubnetMask": netutil.cidr_to_mask(netconfig['prefix']), + } + newgw = netconfig['ipv4_gateway'] + if newgw: + newconfig['Gateway'] = newgw + else: + newconfig['Gateway'] = newip # required property, set to self just to have a value + for net in currnet.get("IPv4Addresses", []): + if net["Address"] == newip and net["SubnetMask"] == newconfig['SubnetMask'] and (not newgw or newconfig['Gateway'] == newgw): + break + else: + wc.set_header('If-Match', '*') + rsp, status = wc.grab_json_response_with_status(actualnics[0], {'IPv4StaticAddresses': [newconfig]}, method='PATCH') + elif self.ipaddr.startswith('fe80::'): + self.configmanager.set_node_attributes( + {nodename: {'hardwaremanagement.manager': self.ipaddr}}) + else: + raise exc.TargetEndpointUnreachable( + 'hardwaremanagement.manager must be set to desired address (No IPv6 Link Local detected)') + + +def remote_nodecfg(nodename, cfm): + cfg = cfm.get_node_attributes( + nodename, 'hardwaremanagement.manager') + ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get( + 'value', None) + ipaddr = ipaddr.split('/', 1)[0] + ipaddr = getaddrinfo(ipaddr, 0)[0][-1] + if not ipaddr: + raise Exception('Cannot remote configure a system without known ' + 'address') + info = {'addresses': [ipaddr]} + nh = NodeHandler(info, cfm) + nh.config(nodename) + +if __name__ == '__main__': + import confluent.config.configmanager as cfm + c = cfm.ConfigManager(None) + import sys + info = {'addresses': [[sys.argv[1]]] } + print(repr(info)) + testr = NodeHandler(info, c) + testr.config(sys.argv[2]) diff --git a/confluent_server/confluent/discovery/protocols/ssdp.py b/confluent_server/confluent/discovery/protocols/ssdp.py index 34b4f6d0..45d1e1f3 100644 --- a/confluent_server/confluent/discovery/protocols/ssdp.py +++ b/confluent_server/confluent/discovery/protocols/ssdp.py @@ -60,6 +60,7 @@ def active_scan(handler, protocol=None): known_peers = set([]) for scanned in scan(['urn:dmtf-org:service:redfish-rest:1', 'urn::service:affluent']): for addr in scanned['addresses']: + addr = addr[0:1] + addr[2:] if addr in known_peers: break hwaddr = neighutil.get_hwaddr(addr[0]) @@ -79,13 +80,20 @@ def scan(services, target=None): def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byehandler, machandlers, handler): - if mac in peerbymacaddress and peer not in peerbymacaddress[mac]['addresses']: - peerbymacaddress[mac]['addresses'].append(peer) + if mac in peerbymacaddress: + normpeer = peer[0:1] + peer[2:] + for currpeer in peerbymacaddress[mac]['addresses']: + currnormpeer = currpeer[0:1] + peer[2:] + if currnormpeer == normpeer: + break + else: + peerbymacaddress[mac]['addresses'].append(peer) else: peerdata = { 'hwaddr': mac, 'addresses': [peer], } + targurl = None for headline in rsp[1:]: if not headline: continue @@ -105,13 +113,20 @@ def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byeha if not value.endswith('/redfish/v1/'): return elif header == 'LOCATION': - if not value.endswith('/DeviceDescription.json'): + if '/eth' in value and value.endswith('.xml'): + targurl = '/redfish/v1/' + targtype = 'megarac-bmc' + continue # MegaRAC redfish + elif value.endswith('/DeviceDescription.json'): + targurl = '/DeviceDescription.json' + targtype = 'megarac-bmc' + else: return - if handler: - eventlet.spawn_n(check_fish_handler, handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer) + if handler and targurl: + eventlet.spawn_n(check_fish_handler, handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer, targurl, targtype) -def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer): - retdata = check_fish(('/DeviceDescription.json', peerdata)) +def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer, targurl, targtype): + retdata = check_fish((targurl, peerdata, targtype)) if retdata: known_peers.add(peer) newmacs.add(mac) @@ -325,7 +340,7 @@ def _find_service(service, target): host = '[{0}]'.format(host) msg = smsg.format(host, service) if not isinstance(msg, bytes): - msg = msg.encode('utf8') + msg = msg.encode('utf8') net6.sendto(msg, addr[4]) else: net4.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) @@ -413,7 +428,11 @@ def _find_service(service, target): if '/redfish/v1/' not in peerdata[nid].get('urls', ()) and '/redfish/v1' not in peerdata[nid].get('urls', ()): continue if '/DeviceDescription.json' in peerdata[nid]['urls']: - pooltargs.append(('/DeviceDescription.json', peerdata[nid])) + pooltargs.append(('/DeviceDescription.json', peerdata[nid], 'lenovo-xcc')) + else: + for targurl in peerdata[nid]['urls']: + if '/eth' in targurl and targurl.endswith('.xml'): + pooltargs.append(('/redfish/v1/', peerdata[nid], 'megarac-bmc')) # For now, don't interrogate generic redfish bmcs # This is due to a need to deduplicate from some supported SLP # targets (IMM, TSM, others) @@ -428,7 +447,7 @@ def _find_service(service, target): def check_fish(urldata, port=443, verifycallback=None): if not verifycallback: verifycallback = lambda x: True - url, data = urldata + url, data, targtype = urldata try: wc = webclient.SecureHTTPConnection(_get_svrip(data), port, verifycallback=verifycallback, timeout=1.5) peerinfo = wc.grab_json_response(url, headers={'Accept': 'application/json'}) @@ -457,7 +476,7 @@ def check_fish(urldata, port=443, verifycallback=None): peerinfo = wc.grab_json_response('/redfish/v1/') if url == '/redfish/v1/': if 'UUID' in peerinfo: - data['services'] = ['service:redfish-bmc'] + data['services'] = [targtype] data['uuid'] = peerinfo['UUID'].lower() return data return None @@ -476,7 +495,12 @@ def _parse_ssdp(peer, rsp, peerdata): if code == b'200': if nid in peerdata: peerdatum = peerdata[nid] - if peer not in peerdatum['addresses']: + normpeer = peer[0:1] + peer[2:] + for currpeer in peerdatum['addresses']: + currnormpeer = currpeer[0:1] + peer[2:] + if currnormpeer == normpeer: + break + else: peerdatum['addresses'].append(peer) else: peerdatum = { @@ -511,5 +535,7 @@ def _parse_ssdp(peer, rsp, peerdata): if __name__ == '__main__': def printit(rsp): - print(repr(rsp)) + pass # print(repr(rsp)) active_scan(printit) + + From cfb31a0d8dbc94df1e481c0c673d94e563eb1a03 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 5 Aug 2024 10:00:22 -0400 Subject: [PATCH 37/51] Implement XCC3 discovery For XCC3, change to generic redfish onboarding mechanism. Extend the generic mechanism to be more specific in some ways that the XCC3 is pickier about. However, it's just reiteration of what should have already have been the case. --- confluent_server/confluent/discovery/core.py | 3 +- .../discovery/handlers/redfishbmc.py | 8 +- .../confluent/discovery/handlers/xcc3.py | 102 ++++++++++++++++++ 3 files changed, 110 insertions(+), 3 deletions(-) create mode 100644 confluent_server/confluent/discovery/handlers/xcc3.py diff --git a/confluent_server/confluent/discovery/core.py b/confluent_server/confluent/discovery/core.py index b734cece..fd302f8b 100644 --- a/confluent_server/confluent/discovery/core.py +++ b/confluent_server/confluent/discovery/core.py @@ -74,6 +74,7 @@ import confluent.discovery.handlers.tsm as tsm import confluent.discovery.handlers.pxe as pxeh import confluent.discovery.handlers.smm as smm import confluent.discovery.handlers.xcc as xcc +import confluent.discovery.handlers.xcc3 as xcc3 import confluent.discovery.handlers.megarac as megarac import confluent.exceptions as exc import confluent.log as log @@ -114,7 +115,7 @@ nodehandlers = { 'service:lenovo-smm': smm, 'service:lenovo-smm2': smm, 'lenovo-xcc': xcc, - 'lenovo-xcc3': xcc, + 'lenovo-xcc3': xcc3, 'megarac-bmc': megarac, 'service:management-hardware.IBM:integrated-management-module2': imm, 'pxe-client': pxeh, diff --git a/confluent_server/confluent/discovery/handlers/redfishbmc.py b/confluent_server/confluent/discovery/handlers/redfishbmc.py index eed401de..97629f36 100644 --- a/confluent_server/confluent/discovery/handlers/redfishbmc.py +++ b/confluent_server/confluent/discovery/handlers/redfishbmc.py @@ -80,6 +80,7 @@ class NodeHandler(generic.NodeHandler): wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert) wc.set_basic_credentials(defuser, defpass) wc.set_header('Content-Type', 'application/json') + wc.set_header('Accept', 'application/json') authmode = 0 if not self.trieddefault: rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') @@ -114,7 +115,7 @@ class NodeHandler(generic.NodeHandler): if status > 400: self.trieddefault = True if status == 401: - wc.set_basic_credentials(self.DEFAULT_USER, self.targpass) + wc.set_basic_credentials(defuser, self.targpass) rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') if status == 200: # Default user still, but targpass self.currpass = self.targpass @@ -236,7 +237,10 @@ class NodeHandler(generic.NodeHandler): break else: wc.set_header('If-Match', '*') - rsp, status = wc.grab_json_response_with_status(actualnics[0], {'IPv4StaticAddresses': [newconfig]}, method='PATCH') + rsp, status = wc.grab_json_response_with_status(actualnics[0], { + 'DHCPv4': {'DHCPEnabled': False}, + 'IPv4StaticAddresses': [newconfig]}, method='PATCH') + elif self.ipaddr.startswith('fe80::'): self.configmanager.set_node_attributes( {nodename: {'hardwaremanagement.manager': self.ipaddr}}) diff --git a/confluent_server/confluent/discovery/handlers/xcc3.py b/confluent_server/confluent/discovery/handlers/xcc3.py new file mode 100644 index 00000000..780de4fc --- /dev/null +++ b/confluent_server/confluent/discovery/handlers/xcc3.py @@ -0,0 +1,102 @@ +# Copyright 2024 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import confluent.discovery.handlers.redfishbmc as redfishbmc +import eventlet.support.greendns +import confluent.util as util + +webclient = eventlet.import_patched('pyghmi.util.webclient') + + + +getaddrinfo = eventlet.support.greendns.getaddrinfo + + +class NodeHandler(redfishbmc.NodeHandler): + + def get_firmware_default_account_info(self): + return ('USERID', 'PASSW0RD') + + def scan(self): + ip, port = self.get_web_port_and_ip() + c = webclient.SecureHTTPConnection(ip, port, + verifycallback=self.validate_cert) + i = c.grab_json_response('/api/providers/logoninfo') + modelname = i.get('items', [{}])[0].get('machine_name', None) + if modelname: + self.info['modelname'] = modelname + for attrname in list(self.info.get('attributes', {})): + val = self.info['attributes'][attrname] + if '-uuid' == attrname[-5:] and len(val) == 32: + val = val.lower() + self.info['attributes'][attrname] = '-'.join([val[:8], val[8:12], val[12:16], val[16:20], val[20:]]) + attrs = self.info.get('attributes', {}) + room = attrs.get('room-id', None) + if room: + self.info['room'] = room + rack = attrs.get('rack-id', None) + if rack: + self.info['rack'] = rack + name = attrs.get('name', None) + if name: + self.info['hostname'] = name + unumber = attrs.get('lowest-u', None) + if unumber: + self.info['u'] = unumber + location = attrs.get('location', None) + if location: + self.info['location'] = location + mtm = attrs.get('enclosure-machinetype-model', None) + if mtm: + self.info['modelnumber'] = mtm.strip() + sn = attrs.get('enclosure-serial-number', None) + if sn: + self.info['serialnumber'] = sn.strip() + if attrs.get('enclosure-form-factor', None) == 'dense-computing': + encuuid = attrs.get('chassis-uuid', None) + if encuuid: + self.info['enclosure.uuid'] = fixuuid(encuuid) + slot = int(attrs.get('slot', 0)) + if slot != 0: + self.info['enclosure.bay'] = slot + + def validate_cert(self, certificate): + fprint = util.get_fingerprint(self.https_cert) + return util.cert_matches(fprint, certificate) + + +def remote_nodecfg(nodename, cfm): + cfg = cfm.get_node_attributes( + nodename, 'hardwaremanagement.manager') + ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get( + 'value', None) + ipaddr = ipaddr.split('/', 1)[0] + ipaddr = getaddrinfo(ipaddr, 0)[0][-1] + if not ipaddr: + raise Exception('Cannot remote configure a system without known ' + 'address') + info = {'addresses': [ipaddr]} + nh = NodeHandler(info, cfm) + nh.config(nodename) + + +if __name__ == '__main__': + import confluent.config.configmanager as cfm + c = cfm.ConfigManager(None) + import sys + info = {'addresses': [[sys.argv[1]]]} + print(repr(info)) + testr = NodeHandler(info, c) + testr.config(sys.argv[2]) + From 30c4d6b863e74cd42d974867c68a45c5873f3ecc Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 5 Aug 2024 11:07:50 -0400 Subject: [PATCH 38/51] Add IPMI enablement to generic Redfish handler If attributes indicate desire for IPMI, try to accomodate. --- .../discovery/handlers/redfishbmc.py | 102 +++++++++++++----- .../confluent/discovery/handlers/xcc.py | 2 +- .../confluent/discovery/handlers/xcc3.py | 1 + 3 files changed, 76 insertions(+), 29 deletions(-) diff --git a/confluent_server/confluent/discovery/handlers/redfishbmc.py b/confluent_server/confluent/discovery/handlers/redfishbmc.py index 97629f36..d4e164d6 100644 --- a/confluent_server/confluent/discovery/handlers/redfishbmc.py +++ b/confluent_server/confluent/discovery/handlers/redfishbmc.py @@ -57,8 +57,28 @@ class NodeHandler(generic.NodeHandler): self.csrftok = None self.channel = None self.atdefault = True + self._srvroot = None + self._mgrinfo = None super(NodeHandler, self).__init__(info, configmanager) + def srvroot(self, wc): + if not self._srvroot: + srvroot, status = wc.grab_json_response_with_status('/redfish/v1/') + if status == 200: + self._srvroot = srvroot + return self._srvroot + + def mgrinfo(self, wc): + if not self._mgrinfo: + mgrs = self.srvroot(wc)['Managers']['@odata.id'] + rsp = wc.grab_json_response(mgrs) + if len(rsp['Members']) != 1: + raise Exception("Can not handle multiple Managers") + mgrurl = rsp['Members'][0]['@odata.id'] + self._mgrinfo = wc.grab_json_response(mgrurl) + return self._mgrinfo + + def get_firmware_default_account_info(self): raise Exception('This must be subclassed') @@ -75,6 +95,30 @@ class NodeHandler(generic.NodeHandler): fprint = util.get_fingerprint(self.https_cert) return util.cert_matches(fprint, certificate) + def enable_ipmi(self, wc): + npu = self.mgrinfo(wc).get( + 'NetworkProtocol', {}).get('@odata.id', None) + if not npu: + raise Exception('Cannot enable IPMI, no NetworkProtocol on BMC') + npi = wc.grab_json_response(npu) + if not npi.get('IPMI', {}).get('ProtocolEnabled'): + wc.set_header('If-Match', '*') + wc.grab_json_response_with_status( + npu, {'IPMI': {'ProtocolEnabled': True}}, method='PATCH') + acctinfo = wc.grab_json_response_with_status( + self.target_account_url(wc)) + acctinfo = acctinfo[0] + actypes = acctinfo['AccountTypes'] + candidates = acctinfo['AccountTypes@Redfish.AllowableValues'] + if 'IPMI' not in actypes and 'IPMI' in candidates: + actypes.append('IPMI') + acctupd = { + 'AccountTypes': actypes, + 'Password': self.currpass, + } + rsp = wc.grab_json_response_with_status( + self.target_account_url(wc), acctupd, method='PATCH') + def _get_wc(self): defuser, defpass = self.get_firmware_default_account_info() wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert) @@ -144,13 +188,33 @@ class NodeHandler(generic.NodeHandler): self.currpass = self.targpass return wc + def target_account_url(self, wc): + asrv = self.srvroot(wc).get('AccountService', {}).get('@odata.id') + rsp, status = wc.grab_json_response_with_status(asrv) + accts = rsp.get('Accounts', {}).get('@odata.id') + rsp, status = wc.grab_json_response_with_status(accts) + accts = rsp.get('Members', []) + for accturl in accts: + accturl = accturl.get('@odata.id', '') + if accturl: + rsp, status = wc.grab_json_response_with_status(accturl) + if rsp.get('UserName', None) == self.curruser: + targaccturl = accturl + break + else: + raise Exception("Unable to identify Account URL to modify on this BMC") + return targaccturl + def config(self, nodename): + mgrs = None self.nodename = nodename creds = self.configmanager.get_node_attributes( nodename, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword', - 'hardwaremanagement.manager', 'hardwaremanagement.method', 'console.method'], - True) + 'hardwaremanagement.manager', + 'hardwaremanagement.method', + 'console.method'], + True) cd = creds.get(nodename, {}) defuser, defpass = self.get_firmware_default_account_info() user, passwd, _ = self.get_node_credentials( @@ -160,7 +224,6 @@ class NodeHandler(generic.NodeHandler): self.targuser = user self.targpass = passwd wc = self._get_wc() - srvroot, status = wc.grab_json_response_with_status('/redfish/v1/') curruserinfo = {} authupdate = {} wc.set_header('Content-Type', 'application/json') @@ -169,21 +232,7 @@ class NodeHandler(generic.NodeHandler): if passwd != self.currpass: authupdate['Password'] = passwd if authupdate: - targaccturl = None - asrv = srvroot.get('AccountService', {}).get('@odata.id') - rsp, status = wc.grab_json_response_with_status(asrv) - accts = rsp.get('Accounts', {}).get('@odata.id') - rsp, status = wc.grab_json_response_with_status(accts) - accts = rsp.get('Members', []) - for accturl in accts: - accturl = accturl.get('@odata.id', '') - if accturl: - rsp, status = wc.grab_json_response_with_status(accturl) - if rsp.get('UserName', None) == self.curruser: - targaccturl = accturl - break - else: - raise Exception("Unable to identify Account URL to modify on this BMC") + targaccturl = self.target_account_url(wc) rsp, status = wc.grab_json_response_with_status(targaccturl, authupdate, method='PATCH') if status >= 300: raise Exception("Failed attempting to update credentials on BMC") @@ -193,7 +242,11 @@ class NodeHandler(generic.NodeHandler): while tries and status >= 300: tries -= 1 eventlet.sleep(1.0) - _, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + _, status = wc.grab_json_response_with_status( + '/redfish/v1/Managers') + if (cd.get('hardwaremanagement.method', {}).get('value', 'ipmi') != 'redfish' + or cd.get('console.method', {}).get('value', None) == 'ipmi'): + self.enable_ipmi(wc) if ('hardwaremanagement.manager' in cd and cd['hardwaremanagement.manager']['value'] and not cd['hardwaremanagement.manager']['value'].startswith( @@ -204,14 +257,8 @@ class NodeHandler(generic.NodeHandler): newip = newipinfo[-1][0] if ':' in newip: raise exc.NotImplementedException('IPv6 remote config TODO') - mgrs = srvroot['Managers']['@odata.id'] - rsp = wc.grab_json_response(mgrs) - if len(rsp['Members']) != 1: - raise Exception("Can not handle multiple Managers") - mgrurl = rsp['Members'][0]['@odata.id'] - mginfo = wc.grab_json_response(mgrurl) - hifurls = get_host_interface_urls(wc, mginfo) - mgtnicinfo = mginfo['EthernetInterfaces']['@odata.id'] + hifurls = get_host_interface_urls(wc, self.mgrinfo(wc)) + mgtnicinfo = self.mgrinfo(wc)['EthernetInterfaces']['@odata.id'] mgtnicinfo = wc.grab_json_response(mgtnicinfo) mgtnics = [x['@odata.id'] for x in mgtnicinfo.get('Members', [])] actualnics = [] @@ -240,7 +287,6 @@ class NodeHandler(generic.NodeHandler): rsp, status = wc.grab_json_response_with_status(actualnics[0], { 'DHCPv4': {'DHCPEnabled': False}, 'IPv4StaticAddresses': [newconfig]}, method='PATCH') - elif self.ipaddr.startswith('fe80::'): self.configmanager.set_node_attributes( {nodename: {'hardwaremanagement.manager': self.ipaddr}}) diff --git a/confluent_server/confluent/discovery/handlers/xcc.py b/confluent_server/confluent/discovery/handlers/xcc.py index 49fe1e87..d4d67590 100644 --- a/confluent_server/confluent/discovery/handlers/xcc.py +++ b/confluent_server/confluent/discovery/handlers/xcc.py @@ -639,7 +639,7 @@ def remote_nodecfg(nodename, cfm): ipaddr = ipaddr.split('/', 1)[0] ipaddr = getaddrinfo(ipaddr, 0)[0][-1] if not ipaddr: - raise Excecption('Cannot remote configure a system without known ' + raise Exception('Cannot remote configure a system without known ' 'address') info = {'addresses': [ipaddr]} nh = NodeHandler(info, cfm) diff --git a/confluent_server/confluent/discovery/handlers/xcc3.py b/confluent_server/confluent/discovery/handlers/xcc3.py index 780de4fc..24974172 100644 --- a/confluent_server/confluent/discovery/handlers/xcc3.py +++ b/confluent_server/confluent/discovery/handlers/xcc3.py @@ -24,6 +24,7 @@ getaddrinfo = eventlet.support.greendns.getaddrinfo class NodeHandler(redfishbmc.NodeHandler): + devname = 'XCC' def get_firmware_default_account_info(self): return ('USERID', 'PASSW0RD') From fc5c1aa90f4551e634db7b063f1db5d945683eb2 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 5 Aug 2024 11:32:57 -0400 Subject: [PATCH 39/51] Fix SSDP error during merge --- confluent_server/confluent/discovery/protocols/ssdp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/confluent_server/confluent/discovery/protocols/ssdp.py b/confluent_server/confluent/discovery/protocols/ssdp.py index 45d1e1f3..e2688d66 100644 --- a/confluent_server/confluent/discovery/protocols/ssdp.py +++ b/confluent_server/confluent/discovery/protocols/ssdp.py @@ -461,7 +461,7 @@ def check_fish(urldata, port=443, verifycallback=None): except KeyError: peerinfo['xcc-variant'] = '3' except IndexError: -+ return None + return None try: myuuid = peerinfo['node-uuid'].lower() if '-' not in myuuid: From 0fd07e842748d60cc67d125bdbf9540adae569bf Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 5 Aug 2024 13:09:50 -0400 Subject: [PATCH 40/51] Fix race condition in SSDP snoop If an asynchronous handler is slow to enroll a target while another target causes an iteration of the snoop loop, the various modified structures had been discarded in the interim. Now persist the data structures iteration to iteration, using 'clear()' to empty them rather than getting brand new data structures each loop. --- .../confluent/discovery/protocols/ssdp.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/confluent_server/confluent/discovery/protocols/ssdp.py b/confluent_server/confluent/discovery/protocols/ssdp.py index e2688d66..c7063838 100644 --- a/confluent_server/confluent/discovery/protocols/ssdp.py +++ b/confluent_server/confluent/discovery/protocols/ssdp.py @@ -116,10 +116,11 @@ def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byeha if '/eth' in value and value.endswith('.xml'): targurl = '/redfish/v1/' targtype = 'megarac-bmc' - continue # MegaRAC redfish + continue # MegaRAC redfish elif value.endswith('/DeviceDescription.json'): targurl = '/DeviceDescription.json' - targtype = 'megarac-bmc' + targtype = 'lenovo-xcc' + continue else: return if handler and targurl: @@ -179,11 +180,14 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None): net4.bind(('', 1900)) net6.bind(('', 1900)) peerbymacaddress = {} + newmacs = set([]) + deferrednotifies = [] + machandlers = {} while True: try: - newmacs = set([]) - deferrednotifies = [] - machandlers = {} + newmacs.clear() + deferrednotifies.clear() + machandlers.clear() r = select.select((net4, net6), (), (), 60) if r: r = r[0] From 0afc3eb03a89b16511a142351c1654cea6ada8a4 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 5 Aug 2024 13:12:54 -0400 Subject: [PATCH 41/51] Port SSDP improvements to SLP It may not apply, but better to be consistent. --- .../confluent/discovery/protocols/slp.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/confluent_server/confluent/discovery/protocols/slp.py b/confluent_server/confluent/discovery/protocols/slp.py index 30acb475..f1e334f3 100644 --- a/confluent_server/confluent/discovery/protocols/slp.py +++ b/confluent_server/confluent/discovery/protocols/slp.py @@ -471,10 +471,13 @@ def snoop(handler, protocol=None): # socket in use can occur when aliased ipv4 are encountered net.bind(('', 427)) net4.bind(('', 427)) - + newmacs = set([]) + known_peers = set([]) + peerbymacaddress = {} + deferpeers = [] while True: try: - newmacs = set([]) + newmacs.clear() r, _, _ = select.select((net, net4), (), (), 60) # clear known_peers and peerbymacaddress # to avoid stale info getting in... @@ -482,9 +485,9 @@ def snoop(handler, protocol=None): # addresses that come close together # calling code needs to understand deeper context, as snoop # will now yield dupe info over time - known_peers = set([]) - peerbymacaddress = {} - deferpeers = [] + known_peers.clear() + peerbymacaddress.clear() + deferpeers.clear() while r and len(deferpeers) < 256: for s in r: (rsp, peer) = s.recvfrom(9000) From e07e6ed152ea1396902199c2cc72993b3ac88706 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 5 Aug 2024 14:56:23 -0400 Subject: [PATCH 42/51] Improve error handling in OpenBMC console --- confluent_server/confluent/plugins/console/openbmc.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/confluent_server/confluent/plugins/console/openbmc.py b/confluent_server/confluent/plugins/console/openbmc.py index 8677ce17..519ca2d4 100644 --- a/confluent_server/confluent/plugins/console/openbmc.py +++ b/confluent_server/confluent/plugins/console/openbmc.py @@ -134,7 +134,12 @@ class TsmConsole(conapi.Console): kv = util.TLSCertVerifier( self.nodeconfig, self.node, 'pubkeys.tls_hardwaremanager').verify_cert wc = webclient.SecureHTTPConnection(self.origbmc, 443, verifycallback=kv) - rsp = wc.grab_json_response_with_status('/login', {'data': [self.username.decode('utf8'), self.password.decode("utf8")]}, headers={'Content-Type': 'application/json'}) + try: + rsp = wc.grab_json_response_with_status('/login', {'data': [self.username.decode('utf8'), self.password.decode("utf8")]}, headers={'Content-Type': 'application/json', 'Accept': 'application/json'}) + except Exception as e: + raise cexc.TargetEndpointUnreachable(str(e)) + if rsp[1] > 400: + raise cexc.TargetEndpointBadCredentials bmc = self.bmc if '%' in self.bmc: prefix = self.bmc.split('%')[0] From 8c1381633116dc389554aa1518fe5adbda33d53a Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Mon, 5 Aug 2024 15:03:00 -0400 Subject: [PATCH 43/51] Fix fetch of model name for XCC3 systems --- confluent_server/confluent/discovery/handlers/xcc3.py | 1 + 1 file changed, 1 insertion(+) diff --git a/confluent_server/confluent/discovery/handlers/xcc3.py b/confluent_server/confluent/discovery/handlers/xcc3.py index 24974172..050186e9 100644 --- a/confluent_server/confluent/discovery/handlers/xcc3.py +++ b/confluent_server/confluent/discovery/handlers/xcc3.py @@ -33,6 +33,7 @@ class NodeHandler(redfishbmc.NodeHandler): ip, port = self.get_web_port_and_ip() c = webclient.SecureHTTPConnection(ip, port, verifycallback=self.validate_cert) + c.set_header('Accept', 'application/json') i = c.grab_json_response('/api/providers/logoninfo') modelname = i.get('items', [{}])[0].get('machine_name', None) if modelname: From feaef79060850b9cc3c0d682c3c4737f227f4c28 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Tue, 6 Aug 2024 09:30:13 -0400 Subject: [PATCH 44/51] Successfully track credential currency across change --- confluent_server/confluent/discovery/handlers/redfishbmc.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/confluent_server/confluent/discovery/handlers/redfishbmc.py b/confluent_server/confluent/discovery/handlers/redfishbmc.py index d4e164d6..7cf3f3d1 100644 --- a/confluent_server/confluent/discovery/handlers/redfishbmc.py +++ b/confluent_server/confluent/discovery/handlers/redfishbmc.py @@ -236,6 +236,8 @@ class NodeHandler(generic.NodeHandler): rsp, status = wc.grab_json_response_with_status(targaccturl, authupdate, method='PATCH') if status >= 300: raise Exception("Failed attempting to update credentials on BMC") + self.curruser = user + self.currpass = passwd wc.set_basic_credentials(user, passwd) _, status = wc.grab_json_response_with_status('/redfish/v1/Managers') tries = 10 From 21b1ac7690f301c9ef533868c8aae5e4a53dcf50 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Tue, 6 Aug 2024 09:34:46 -0400 Subject: [PATCH 45/51] Remove asyncore for jammy asyncore isn't needed before noble --- confluent_server/builddeb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/confluent_server/builddeb b/confluent_server/builddeb index 4071b5b1..a63d9d4f 100755 --- a/confluent_server/builddeb +++ b/confluent_server/builddeb @@ -35,6 +35,8 @@ cd deb_dist/!(*.orig)/ if [ "$OPKGNAME" = "confluent-server" ]; then if grep wheezy /etc/os-release; then sed -i 's/^\(Depends:.*\)/\1, python-confluent-client, python-lxml, python-eficompressor, python-pycryptodomex, python-dateutil, python-pyopenssl, python-msgpack/' debian/control + elif grep jammy /etc/os-release; then + sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi, python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd, python3-netifaces, python3-yaml, python3-dateutil/' debian/control else sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi, python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd, python3-netifaces, python3-yaml, python3-dateutil, python3-pyasyncore/' debian/control fi From ef1f51ef988ac296b06ccf20c6ea3078a5c13bfc Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Tue, 6 Aug 2024 10:05:39 -0400 Subject: [PATCH 46/51] Wire in bmc config clear to redfish --- .../confluent/plugins/hardwaremanagement/redfish.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/confluent_server/confluent/plugins/hardwaremanagement/redfish.py b/confluent_server/confluent/plugins/hardwaremanagement/redfish.py index f53cc393..2c2857de 100644 --- a/confluent_server/confluent/plugins/hardwaremanagement/redfish.py +++ b/confluent_server/confluent/plugins/hardwaremanagement/redfish.py @@ -522,6 +522,8 @@ class IpmiHandler(object): return self.handle_sysconfig(True) elif self.element[1:3] == ['system', 'clear']: return self.handle_sysconfigclear() + elif self.element[1:3] == ['management_controller', 'clear']: + return self.handle_bmcconfigclear() elif self.element[1:3] == ['management_controller', 'licenses']: return self.handle_licenses() elif self.element[1:3] == ['management_controller', 'save_licenses']: @@ -1323,6 +1325,12 @@ class IpmiHandler(object): self.ipmicmd.set_bmc_configuration( self.inputdata.get_attributes(self.node)) + def handle_bmcconfigclear(self): + if 'read' == self.op: + raise exc.InvalidArgumentException( + 'Cannot read the "clear" resource') + self.ipmicmd.clear_bmc_configuration() + def handle_sysconfigclear(self): if 'read' == self.op: raise exc.InvalidArgumentException( From f2b9a4fa5d2bb5c5820c352b7fce641e79323c3d Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Tue, 6 Aug 2024 12:25:21 -0400 Subject: [PATCH 47/51] Improve handling of ssh service being pre-hooked --- imgutil/imgutil | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/imgutil/imgutil b/imgutil/imgutil index bc34af01..5b5de0b2 100644 --- a/imgutil/imgutil +++ b/imgutil/imgutil @@ -661,11 +661,20 @@ class DebHandler(OsHandler): run_constrainedx(fancy_chroot, (args, self.targpath)) args.cmd = ['apt-get', '-y', 'install'] + self.includepkgs run_constrainedx(fancy_chroot, (args, self.targpath)) - servicefile = os.path.join(self.targpath, 'usr/lib/systemd/system/ssh.service') + servicefile = os.path.join( + self.targpath, 'usr/lib/systemd/system/ssh.service') if os.path.exists(servicefile): - os.symlink('/usr/lib/systemd/system/ssh.service', os.path.join(self.targpath, 'etc/systemd/system/multi-user.target.wants/ssh.service')) + targfile = os.path.join( + self.targpath, + 'etc/systemd/system/multi-user.target.wants/ssh.service') + if not os.path.exists(targfile): + os.symlink('/usr/lib/systemd/system/ssh.service', targfile) else: - os.symlink('/usr/lib/systemd/system/sshd.service', os.path.join(self.targpath, 'etc/systemd/system/multi-user.target.wants/sshd.service')) + targfile = os.path.join( + self.targpath, + 'etc/systemd/system/multi-user.target.wants/sshd.service') + if not os.path.exists(targfile): + os.symlink('/usr/lib/systemd/system/sshd.service', targfile) From 7ab76004925ff82cceafaaaac158d71f9ba0f04b Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 7 Aug 2024 07:56:11 -0400 Subject: [PATCH 48/51] Add cpio dependency for imgutil --- imgutil/confluent_imgutil.spec.tmpl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/imgutil/confluent_imgutil.spec.tmpl b/imgutil/confluent_imgutil.spec.tmpl index 35ed4070..f7dea7a7 100644 --- a/imgutil/confluent_imgutil.spec.tmpl +++ b/imgutil/confluent_imgutil.spec.tmpl @@ -8,13 +8,13 @@ Source: confluent_imgutil.tar.xz BuildArch: noarch BuildRoot: /tmp/ %if "%{dist}" == ".el8" -Requires: squashfs-tools +Requires: squashfs-tools cpio %else %if "%{dist}" == ".el9" -Requires: squashfs-tools +Requires: squashfs-tools cpio %else %if "%{dist}" == ".el7" -Requires: squashfs-tools +Requires: squashfs-tools cpio %else Requires: squashfs %endif From 187fda4bb865b0f11c6333f6145e4a02f043527e Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 7 Aug 2024 07:58:08 -0400 Subject: [PATCH 49/51] Add debootstrap dependency for imgutil --- imgutil/control.tmpl | 1 + 1 file changed, 1 insertion(+) diff --git a/imgutil/control.tmpl b/imgutil/control.tmpl index a0fe21af..3bc8644c 100644 --- a/imgutil/control.tmpl +++ b/imgutil/control.tmpl @@ -5,4 +5,5 @@ Priority: optional Maintainer: Jarrod Johnson Description: Web frontend for confluent server Architecture: all +Depends: debootstrap From ca4955101d3ca912bf2c030ba77285212a2e149d Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 7 Aug 2024 08:40:10 -0400 Subject: [PATCH 50/51] Improve "realness" of imgutil exec context Utilities that expected /dev/pts will now be satisfied, as a new /dev/pts is mounted. Further, systemd added a check in various utilities that was fouled by the previous method of appearing to have a root filesystem. Before, after chroot, we would bind mount / to itself, and this made things using /proc/mounts, /proc/self/mountinfo, df, mount, etc happy that there is a real looking root filesystem. However, by doing it after the chroot, systemd could statx on '..' and get a different mnt id than /. So it had to be done prior to the chroot. However it also had to be done before other mounts as bind mounting over it would block the submounts. This more closely imitates the initramfs behavior, where '/' starts life as a 'real' filesystem before being mounted up and switched into. This behavior was made to imitate the 'start_root.c' behavior as that seems to be more broadly successful. --- imgutil/imgutil | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/imgutil/imgutil b/imgutil/imgutil index 5b5de0b2..c5446069 100644 --- a/imgutil/imgutil +++ b/imgutil/imgutil @@ -963,7 +963,6 @@ def fancy_chroot(args, installroot): _mount('none', dstresolv, flags=MS_RDONLY|MS_REMOUNT|MS_BIND) os.chroot(installroot) os.chdir('/') - _mount('/', '/', flags=MS_BIND) # Make / manifest as a mounted filesystem in exec os.environ['PS1'] = '[\x1b[1m\x1b[4mIMGUTIL EXEC {0}\x1b[0m \\W]$ '.format(imgname) os.environ['CONFLUENT_IMGUTIL_MODE'] = 'exec' if oshandler: @@ -1004,7 +1003,13 @@ def build_root_backend(optargs): def _mount_constrained_fs(args, installroot): + # This is prepping for a chroot. + # For the target environment to be content with having a root + # filesystem, installroot must be a 'mount' entry of it's own, + # so bind mount to itself to satisfy + _mount(installroot, installroot, flags=MS_BIND) _mount('/dev', os.path.join(installroot, 'dev'), flags=MS_BIND|MS_RDONLY) + _mount('/dev/pts', os.path.join(installroot, 'dev/pts'), flags=MS_BIND|MS_RDONLY) _mount('proc', os.path.join(installroot, 'proc'), fstype='proc') _mount('sys', os.path.join(installroot, 'sys'), fstype='sysfs') _mount('runfs', os.path.join(installroot, 'run'), fstype='tmpfs') From 4453ba3b64bb41e6e37ae204115fdfdf6d4bc296 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 7 Aug 2024 09:20:34 -0400 Subject: [PATCH 51/51] Add cpio to confluent_server In order to do osdeploy processing, we must have cpio --- confluent_server/confluent_server.spec.tmpl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/confluent_server/confluent_server.spec.tmpl b/confluent_server/confluent_server.spec.tmpl index bf81c969..04e63b21 100644 --- a/confluent_server/confluent_server.spec.tmpl +++ b/confluent_server/confluent_server.spec.tmpl @@ -14,13 +14,13 @@ Prefix: %{_prefix} BuildArch: noarch Requires: confluent_vtbufferd %if "%{dist}" == ".el7" -Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic +Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic cpio %else %if "%{dist}" == ".el8" -Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute +Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute cpio %else %if "%{dist}" == ".el9" -Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute +Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute cpio %else Requires: python3-dbm,python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodome >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dnspython, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-PyYAML openssl iproute %endif