2
0
mirror of https://github.com/xcat2/confluent.git synced 2026-04-09 10:21:39 +00:00

Merge branch 'master' into async

Try to merge in 2025 work into async
This commit is contained in:
Jarrod Johnson
2026-01-20 14:24:01 -05:00
258 changed files with 10906 additions and 1354 deletions

View File

@@ -13,7 +13,7 @@ If you're coming from xCAT, check out [this comparison](https://hpc.lenovo.com/u
# Documentation
Confluent documentation is hosted on hpc.lenovo.com: https://hpc.lenovo.com/users/documentation/
Confluent documentation is hosted on: https://xcat2.github.io/confluent-docs/
# Download

View File

@@ -0,0 +1,8 @@
FROM almalinux:10
RUN ["yum", "-y","update"]
RUN ["yum", "-y","install","gcc","make","rpm-build","python3-devel","python3-setuptools","createrepo","python3", "perl", "perl-DBI", "perl-JSON", "perl-XML-LibXML", "pinentry-tty", "rpm-sign", "git", "golang"]
ADD rpmmacro /root/.rpmmacros
ADD buildpackages.sh /bin/
#VOLUME ["/rpms", "/srpms"]
CMD ["/bin/bash","/bin/buildpackages.sh"]

View File

@@ -0,0 +1,6 @@
for package in /srpms/*; do
rpmbuild --rebuild $package
done
find ~/rpmbuild/RPMS -type f -exec cp {} /rpms/ \;

3
build/arm/el10/rpmmacro Normal file
View File

@@ -0,0 +1,3 @@
%_gpg_digest_algo sha256
%_gpg_name Lenovo Scalable Infrastructure

8
build/arm/el8/Dockerfile Normal file
View File

@@ -0,0 +1,8 @@
FROM almalinux:8
RUN ["yum", "-y","update"]
RUN ["yum", "-y","install","gcc","make","rpm-build","python3-devel","python3-setuptools","createrepo","python3", "perl", "perl-DBI", "perl-JSON", "perl-Net-DNS", "perl-DB_File", "perl-XML-LibXML", "rpm-sign", "git", "fuse-devel","libcurl-devel"]
ADD rpmmacro /root/.rpmmacros
ADD buildpackages.sh /bin/
#VOLUME ["/rpms", "/srpms"]
CMD ["/bin/bash","/bin/buildpackages.sh"]

View File

@@ -0,0 +1,6 @@
#!/bin/bash
for package in /srpms/*; do
rpmbuild --rebuild $package
done
find ~/rpmbuild/RPMS -type f -exec cp {} /rpms/ \;

2
build/arm/el8/rpmmacro Normal file
View File

@@ -0,0 +1,2 @@
%_gpg_digest_algo sha256
%_gpg_name Lenovo Scalable Infrastructure

10
build/arm/el9/Dockerfile Normal file
View File

@@ -0,0 +1,10 @@
FROM almalinux:9
RUN ["yum", "-y","update"]
RUN ["yum", "-y","install","gcc","make","rpm-build","python3-devel","python3-setuptools","createrepo","python3", "perl", "perl-DBI", "perl-JSON", "perl-Net-DNS", "perl-DB_File", "perl-XML-LibXML", "pinentry-tty", "rpm-sign", "epel-release", "git"]
RUN ["crb", "enable"]
RUN ["yum", "-y","install","fuse-devel","libcurl-devel"]
ADD rpmmacro /root/.rpmmacros
ADD buildpackages.sh /bin/
#VOLUME ["/rpms", "/srpms"]
CMD ["/bin/bash","/bin/buildpackages.sh"]

View File

@@ -0,0 +1,6 @@
#!/bin/bash
for package in /srpms/*; do
rpmbuild --rebuild $package
done
find ~/rpmbuild/RPMS -type f -exec cp {} /rpms/ \;

2
build/arm/el9/rpmmacro Normal file
View File

@@ -0,0 +1,2 @@
%_gpg_digest_algo sha256
%_gpg_name Lenovo Scalable Infrastructure

View File

@@ -0,0 +1,12 @@
FROM ubuntu:noble
ADD stdeb.patch /tmp/
ADD buildapt.sh /bin/
ADD distributions.tmpl /bin/
RUN ["apt-get", "update"]
RUN ["apt-get", "install", "-y", "reprepro", "python3-stdeb", "gnupg-agent", "devscripts", "debhelper", "libsoap-lite-perl", "libdbi-perl", "quilt", "git", "python3-pyparsing", "python3-dnspython", "python3-eventlet", "python3-netifaces", "python3-paramiko", "dh-python", "libjson-perl", "ronn", "alien", "gcc", "make"]
RUN ["mkdir", "-p", "/sources/git/"]
RUN ["mkdir", "-p", "/debs/"]
RUN ["mkdir", "-p", "/apt/"]
RUN ["bash", "-c", "patch -p1 < /tmp/stdeb.patch"]
CMD ["/bin/bash", "/bin/buildapt.sh"]

View File

@@ -0,0 +1,21 @@
#cp -a /sources/git /tmp
for builder in $(find /sources/git -name builddeb); do
cd $(dirname $builder)
./builddeb /debs/
done
cp /prebuilt/* /debs/
cp /osd/*.deb /debs/
mkdir -p /apt/conf/
CODENAME=$(grep VERSION_CODENAME= /etc/os-release | sed -e 's/.*=//')
if [ -z "$CODENAME" ]; then
CODENAME=$(grep VERSION= /etc/os-release | sed -e 's/.*(//' -e 's/).*//')
fi
if ! grep $CODENAME /apt/conf/distributions; then
sed -e s/#CODENAME#/$CODENAME/ /bin/distributions.tmpl >> /apt/conf/distributions
fi
cd /apt/
reprepro includedeb $CODENAME /debs/*.deb
for dsc in /debs/*.dsc; do
reprepro includedsc $CODENAME $dsc
done

View File

@@ -0,0 +1,7 @@
Origin: Lenovo HPC Packages
Label: Lenovo HPC Packages
Codename: #CODENAME#
Architectures: amd64 source
Components: main
Description: Lenovo HPC Packages

View File

@@ -0,0 +1,34 @@
diff -urN t/usr/lib/python3/dist-packages/stdeb/cli_runner.py t.patch/usr/lib/python3/dist-packages/stdeb/cli_runner.py
--- t/usr/lib/python3/dist-packages/stdeb/cli_runner.py 2024-06-11 18:30:13.930328999 +0000
+++ t.patch/usr/lib/python3/dist-packages/stdeb/cli_runner.py 2024-06-11 18:32:05.392731405 +0000
@@ -8,7 +8,7 @@
from ConfigParser import SafeConfigParser # noqa: F401
except ImportError:
# python 3.x
- from configparser import SafeConfigParser # noqa: F401
+ from configparser import ConfigParser # noqa: F401
from distutils.util import strtobool
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from stdeb.util import stdeb_cmdline_opts, stdeb_cmd_bool_opts
diff -urN t/usr/lib/python3/dist-packages/stdeb/util.py t.patch/usr/lib/python3/dist-packages/stdeb/util.py
--- t/usr/lib/python3/dist-packages/stdeb/util.py 2024-06-11 18:32:53.864776149 +0000
+++ t.patch/usr/lib/python3/dist-packages/stdeb/util.py 2024-06-11 18:33:02.063952870 +0000
@@ -730,7 +730,7 @@
example.
"""
- cfg = ConfigParser.SafeConfigParser()
+ cfg = ConfigParser.ConfigParser()
cfg.read(cfg_files)
if cfg.has_section(module_name):
section_items = cfg.items(module_name)
@@ -801,7 +801,7 @@
if len(cfg_files):
check_cfg_files(cfg_files, module_name)
- cfg = ConfigParser.SafeConfigParser(cfg_defaults)
+ cfg = ConfigParser.ConfigParser(cfg_defaults)
for cfg_file in cfg_files:
with codecs.open(cfg_file, mode='r', encoding='utf-8') as fd:
cfg.readfp(fd)

9
build/buildarm Normal file
View File

@@ -0,0 +1,9 @@
cd ~/confluent
git pull
rm ~/rpmbuild/RPMS/noarch/*osdeploy*
rm ~/rpmbuild/SRPMS/*osdeploy*
sh confluent_osdeploy/buildrpm-aarch64
mkdir -p $HOME/el9/
mkdir -p $HOME/el10/
podman run --rm -it -v $HOME:/build el9build bash /build/confluent/confluent_vtbufferd/buildrpm /build/el9/

View File

@@ -14,11 +14,16 @@ import shutil
shutil.copyfile('doc/man/nodeattrib.ronn.tmpl', 'doc/man/nodeattrib.ronn')
shutil.copyfile('doc/man/nodegroupattrib.ronn.tmpl', 'doc/man/nodegroupattrib.ronn')
with open('doc/man/nodeattrib.ronn', 'a') as outf:
for field in sorted(attr.node):
outf.write('\n* `{0}`:\n {1}\n'.format(field, attr.node[field]['description']))
with open('doc/man/nodegroupattrib.ronn', 'a') as outf:
for field in sorted(attr.node):
outf.write('\n* `{0}`:\n {1}\n'.format(field, attr.node[field]['description']))
def append_attributes(filename):
with open(filename, 'a') as outf:
for field in sorted(attr.node):
outf.write('\n* `{0}`:\n {1}\n'.format(field, attr.node[field]['description']))
# Optionally write valid values if they exist
for key, values in attr.node[field].items():
if key.startswith('valid'):
values_formatted = ', '.join("'{0}'".format(v) for v in values)
outf.write(f'\n Valid values: {values_formatted}\n')
append_attributes('doc/man/nodeattrib.ronn')
append_attributes('doc/man/nodegroupattrib.ronn')

View File

@@ -45,6 +45,7 @@ import math
import getpass
import optparse
import os
import re
import select
import shlex
import signal
@@ -663,17 +664,18 @@ def quitconfetty(code=0, fullexit=False, fixterm=True):
def get_session_node(shellargs):
# straight to node console
if len(shellargs) == 1 and ' ' not in shellargs[0]:
return shellargs[0]
targ = "/nodes/%s/console/session" % shellargs[0]
return targ, shellargs[0]
if len(shellargs) == 2 and shellargs[0] == 'start':
args = [s for s in shellargs[1].split('/') if s]
if len(args) == 4 and args[0] == 'nodes':
if args[2] == 'console' and \
args[3] == 'session':
return args[1]
if args[2] == 'shell' and \
return shellargs[1], args[1]
if len(args) == 5 and args[0] == 'nodes' and args[2] == 'shell' and \
args[3] == 'sessions':
return args[1]
return None
return shellargs[1], args[1]
return None, None
def run_inline_command(path, arg, completion, **kwargs):
@@ -930,10 +932,10 @@ def main():
doexit = False
inconsole = False
pendingcommand = ""
session_node = get_session_node(shellargs)
targ, session_node = get_session_node(shellargs)
if session_node is not None:
consoleonly = True
do_command(shellargs, netserver)
do_command("start %s" % targ, netserver)
doexit = True
elif shellargs:
do_command(shellargs, netserver)
@@ -978,8 +980,15 @@ def main():
sys.stdout.write('Lost connection to server')
quitconfetty(fullexit=True)
sgr_re = re.compile(r'(\x1b\[[0-9;]*m)')
sgr_parameters_re = re.compile(r'\x1b\[([0-9;]*)m')
fgcolor = None
bgcolor = None
fgshifted = False
pendseq = ''
def consume_termdata(fh, bufferonly=False):
global clearpowermessage
global fgcolor, bgcolor, fgshifted, pendseq
try:
data = tlvdata.recv(fh)
except Exception:
@@ -988,7 +997,59 @@ def consume_termdata(fh, bufferonly=False):
updatestatus(data)
return ''
if data is not None:
data = client.stringify(data)
indata = pendseq + client.stringify(data)
pendseq = ''
data = ''
for segment in sgr_re.split(indata):
if sgr_re.match(segment): # it is an sgr, analyze, maybe replace
params = []
for parameters in sgr_parameters_re.findall(segment):
for param in parameters.split(';'):
params.append(param)
if param == '0':
fgcolor = None
bgcolor = None
try:
ival = int(param)
except ValueError:
continue
if 40 <= ival <= 47 or 100 <= ival <= 107:
bgcolor = ival
if 30 <= ival <= 37 or 90 <= ival <= 97:
fgcolor = ival
if bgcolor is not None:
fgindicated = False
for idx, param in enumerate(params):
try:
ival = int(param)
except ValueError:
continue
if 30 <= ival <= 37 and (bgcolor % 10 == ival % 10):
fgindicated = True
fgshifted = True
ival += 60
params[idx] = str(ival)
if not fgindicated and fgcolor is not None:
if bgcolor and (bgcolor % 10) == (fgcolor % 10):
fgshifted = True
params.append(str((fgcolor % 10) + 90))
elif fgshifted:
params.append(str(fgcolor))
segment = '\x1b[' + ';'.join(str(p) for p in params) + 'm'
data += segment
# defer any partial ansi escape sequence for a later pass
escidx = segment.rfind('\x1b[')
if escidx >= 0:
for chr in segment[escidx + 2:]:
if 0x40 <= ord(chr) <= 0x7e:
break
else:
# incomplete escape sequence, don't print it yet
data = data[:-len(segment) + escidx]
pendseq = segment[escidx:]
if not pendseq and segment and segment[-1] == '\x1b':
data = data[:-1]
pendseq = '\x1b'
if clearpowermessage:
sys.stdout.write("\x1b[2J\x1b[;H")
clearpowermessage = False

View File

@@ -1,8 +1,9 @@
#!/usr/bin/python2
#!/usr/bin/python3
import optparse
import signal
import sys
import os
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
@@ -17,18 +18,16 @@ import confluent.client as client
import confluent.sortutil as sortutil
def lookupdata(data, key):
ret = data.get(key, {}).get('value', '')
if ret is None:
ret = ''
return ret
def main():
argparser = optparse.OptionParser(
usage='''\n %prog noderange -o ansible.hosts
\n ''')
usage='''\n %prog noderange -o ansible.hosts -a
''')
argparser.add_option('-o', '--output',
help='Ansible hosts file')
help='Writes an Ansible hosts file')
argparser.add_option('-a', '--append', action='store_true',
help='Appends to existing hosts file')
(options, args) = argparser.parse_args()
try:
noderange = args[0]
@@ -45,24 +44,42 @@ def main():
if node not in databynode:
databynode[node] = {}
databynode[node].update(res['databynode'][node])
nodesbygroup = {}
with open(options.output, 'w') as importfile:
needempty = False
for node in sortutil.natural_sort(databynode):
data = databynode[node]
if not data.get('groups', []):
importfile.write(node + '\n')
needempty = True
for g in data.get('groups', []):
if g not in nodesbygroup:
nodesbygroup[g] = set([node])
for node in sortutil.natural_sort(databynode):
data = databynode[node]
groups = data.get('groups', [])
if not groups:
nodesbygroup.setdefault('', set()).add(node.strip().lower())
else:
for g in groups:
nodesbygroup.setdefault(g, set()).add(node.strip().lower())
existing_data = {}
if options.append and os.path.exists(options.output):
current_group = ''
with open(options.output, 'r') as f:
for line in f:
line = line.strip().lower()
if not line:
continue
if line.startswith('[') and line.endswith(']'):
current_group = line[1:-1]
existing_data.setdefault(current_group, set())
else:
nodesbygroup[g].add(node)
if needempty:
importfile.write('\n')
for group in sortutil.natural_sort(nodesbygroup):
importfile.write('[{0}]\n'.format(group))
for node in sortutil.natural_sort(nodesbygroup[group]):
existing_data.setdefault(current_group, set()).add(line)
for group, nodes in nodesbygroup.items():
nodes = {n.strip().lower() for n in nodes}
current_nodes = existing_data.get(group, set())
new_nodes = nodes - current_nodes
if new_nodes:
existing_data.setdefault(group, set()).update(nodes)
with open(options.output, 'w') as importfile:
for group in sortutil.natural_sort(existing_data.keys()):
if group:
importfile.write('[{0}]\n'.format(group))
for node in sortutil.natural_sort(existing_data[group]):
importfile.write('{0}\n'.format(node))
importfile.write('\n')

View File

@@ -118,6 +118,7 @@ def main():
ap.add_argument('-a', '--attrib', help='Pull ip addresses and hostnames from attribute database', action='store_true')
ap.add_argument('-i', '--ip', help='Expression to generate addresses (e.g. 172.16.1.{n1} or fd2b:246f:8a50::{n1:x})')
ap.add_argument('-n', '--name', help='Expression for name to add ({node}-compute, etc). If unspecified, "{node} {node}.{dns.domain}" will be used', action='append')
ap.add_argument('-f', '--fqdn-first', help='Put the FQDN first in the hosts entries', action='store_true')
args = ap.parse_args()
c = client.Command()
if args.name:
@@ -173,7 +174,13 @@ def main():
break
else:
for name in list(names):
names.append('{0}.{1}'.format(name, mydomain))
fqdn = '{0}.{1}'.format(name, mydomain)
if args.fqdn_first:
# Insert FQDN at the beginning if --fqdn-first flag is set
names.insert(0, fqdn)
else:
# Otherwise, append FQDN at the end (original behavior)
names.append(fqdn)
names = ' '.join(names)
merger.add_entry(ipdb[node][currnet], names)
merger.write_out('/etc/whatnowhosts')

View File

@@ -8,30 +8,35 @@ import os
import subprocess
import sys
def create_image(directory, image, label=None):
ents = 0
datasz = 512
for dir in os.walk(sys.argv[1]):
ents += 1
for filen in dir[2]:
def create_image(directory, image, label=None, esize=0, totalsize=None):
if totalsize:
datasz = totalsize * 1048576
else:
ents = 0
datasz = 512 + (esize * 1048576)
for dir in os.walk(sys.argv[1]):
ents += 1
filename = os.path.join(dir[0], filen)
currsz = os.path.getsize(filename)
# assuming up to 65k cluster
currsz = (currsz // 512 +1) * 512
datasz += currsz
datasz += ents * 32768
datasz = datasz // 16384 + 1
for filen in dir[2]:
ents += 1
filename = os.path.join(dir[0], filen)
currsz = os.path.getsize(filename)
# assuming up to 65k cluster
currsz = (currsz // 512 + 1) * 512
datasz += currsz
datasz += ents * 32768
datasz = datasz // 65536 + 1
with open(image, 'wb') as imgfile:
imgfile.seek(datasz * 16384 - 1)
imgfile.seek(datasz * 65536 - 1)
imgfile.write(b'\x00')
if label:
# 4 heads, 32 sectors, means 65k per track
subprocess.check_call(['mformat', '-i', image, '-v', label,
'-r', '16', '-d', '1', '-t', str(datasz),
'-s', '16','-h', '2', '::'])
'-s', '32','-h', '4', '::'])
else:
subprocess.check_call(['mformat', '-i', image, '-r', '16', '-d', '1', '-t',
str(datasz), '-s', '16','-h', '2', '::'])
str(datasz), '-s', '32','-h', '4', '::'])
# Some clustered filesystems will have the lock from mformat
# linger after close (mformat doesn't unlock)
# do a blocking wait for shared lock and then explicitly
@@ -56,6 +61,21 @@ if __name__ == '__main__':
sys.argv[0]))
sys.exit(1)
label = None
if len(sys.argv) > 3:
label = sys.argv[3]
create_image(sys.argv[1], sys.argv[2], label)
args = sys.argv
esize = 0
try:
earg = args.index('-e')
esize = int(args[earg + 1])
args = args[:earg] + args[earg +2:]
except ValueError:
pass
totsize = None
try:
earg = args.index('-s')
totsize = int(args[earg + 1])
args = args[:earg] + args[earg +2:]
except ValueError:
pass
if len(args) > 3:
label = args[3]
create_image(args[1], args[2], label, esize, totsize)

View File

@@ -68,6 +68,7 @@ def run():
currprocs = 0
all = set([])
poller = select.epoll()
pipedesc = {}
pendingexecs = deque()
exitcode = 0
@@ -102,19 +103,23 @@ def run():
cmdv = ['ssh', sshnode] + cmdvbase + cmdstorun[0]
if currprocs < concurrentprocs:
currprocs += 1
run_cmdv(sshnode, cmdv, all, pipedesc)
run_cmdv(sshnode, cmdv, all, poller, pipedesc)
else:
pendingexecs.append((sshnode, cmdv))
if not all or exitcode:
sys.exit(exitcode)
rdy, _, _ = select.select(all, [], [], 10)
rdy = poller.poll(10)
while all:
pernodeout = {}
for r in rdy:
r = r[0]
desc = pipedesc[r]
r = desc['file']
node = desc['node']
data = True
while data and select.select([r], [], [], 0)[0]:
singlepoller = select.epoll()
singlepoller.register(r, select.EPOLLIN)
while data and singlepoller.poll(0):
data = r.readline()
if data:
if desc['type'] == 'stdout':
@@ -131,15 +136,17 @@ def run():
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
poller.unregister(r)
r.close()
if desc['type'] == 'stdout':
if idxbynode[node] < len(cmdstorun):
cmdv = ['ssh', sshnode] + cmdvbase + cmdstorun[idxbynode[node]]
idxbynode[node] += 1
run_cmdv(node, cmdv, all, pipedesc)
run_cmdv(node, cmdv, all, poller, pipedesc)
elif pendingexecs:
node, cmdv = pendingexecs.popleft()
run_cmdv(node, cmdv, all, pipedesc)
run_cmdv(node, cmdv, all, poller. pipedesc)
singlepoller.close()
for node in sortutil.natural_sort(pernodeout):
for line in pernodeout[node]:
line = client.stringify(line)
@@ -147,19 +154,21 @@ def run():
sys.stdout.write('{0}: {1}'.format(node, line))
sys.stdout.flush()
if all:
rdy, _, _ = select.select(all, [], [], 10)
rdy = poller.poll(10)
sys.exit(exitcode)
def run_cmdv(node, cmdv, all, pipedesc):
def run_cmdv(node, cmdv, all, poller, pipedesc):
nopen = subprocess.Popen(
cmdv, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pipedesc[nopen.stdout] = {'node': node, 'popen': nopen,
'type': 'stdout'}
pipedesc[nopen.stderr] = {'node': node, 'popen': nopen,
'type': 'stderr'}
pipedesc[nopen.stdout.fileno()] = {'node': node, 'popen': nopen,
'type': 'stdout', 'file': nopen.stdout}
pipedesc[nopen.stderr.fileno()] = {'node': node, 'popen': nopen,
'type': 'stderr', 'file': nopen.stderr}
all.add(nopen.stdout)
poller.register(nopen.stdout, select.EPOLLIN)
all.add(nopen.stderr)
poller.register(nopen.stderr, select.EPOLLIN)
if __name__ == '__main__':

View File

@@ -0,0 +1,104 @@
#!/usr/bin/python3
import os
import sys
from cryptography import x509
from cryptography.hazmat.primitives import hashes
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
sys.path.append(path)
import confluent.client as client
def removebmccacert(noderange, certid, cmd):
for res in cmd.delete(f'/noderange/{noderange}/configuration/management_controller/certificate_authorities/{certid}'):
print(repr(res))
def listbmccacerts(noderange, cmd):
certids = []
for res in cmd.read(f'/noderange/{noderange}/configuration/management_controller/certificate_authorities'):
certids.append(res.get('item', {}).get('href', ''))
for certid in certids:
for res in cmd.read(f'/noderange/{noderange}/configuration/management_controller/certificate_authorities/{certid}'):
for node in res.get('databynode', {}):
certdata = res['databynode'][node].get('pem', {}).get('value', '')
summary = ''
if not certdata:
continue
san = res['databynode'][node].get('san', {}).get('value', '')
if san:
summary += f" SANs: {san}"
subject = res['databynode'][node].get('subject', {}).get('value', '')
if subject:
summary = subject
try:
cert = x509.load_pem_x509_certificate(certdata.encode())
sha256 = cert.fingerprint(hashes.SHA256()).hex().upper()
except Exception as e:
print(f"Error processing certificate for {node}: {e}", file=sys.stderr)
continue
summary += f" (SHA256={sha256})"
print(f"{node}: {certid}: {summary}")
def installbmccacert(noderange, certfile, cmd):
if certfile:
try:
with open(certfile, 'r') as f:
certdata = f.read()
except Exception as e:
print(f"Error reading certificate file: {e}", file=sys.stderr)
sys.exit(1)
# Simple validation: check if it starts and ends with the correct PEM markers
if not (certdata.startswith("-----BEGIN CERTIFICATE-----") and certdata.strip().endswith("-----END CERTIFICATE-----")):
print("Invalid certificate format. Must be a PEM encoded certificate.", file=sys.stderr)
sys.exit(1)
payload = {'pem': certdata}
for res in cmd.update(f'/noderange/{noderange}/configuration/management_controller/certificate_authorities', payload):
print(repr(res))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Node certificate utility')
parser.add_argument('noderange', help='Node range to operate on')
subparsers = parser.add_subparsers(dest='command', help='Available commands')
# installbmccacert subcommand
install_parser = subparsers.add_parser('installbmccacert', help='Install BMC CA certificate')
install_parser.add_argument('filename', help='Certificate file to install')
remove_parser = subparsers.add_parser('removebmccacert', help='Remove BMC CA certificate')
remove_parser.add_argument('id', help='Certificate id to remove')
list_parser = subparsers.add_parser('listbmccacerts', help='List BMC CA certificates')
sign_bmc_parser = subparsers.add_parser('signbmccert', help='Sign BMC certificate')
sign_bmc_parser.add_argument('--days', type=int, help='Number of days the certificate is valid for')
sign_bmc_parser.add_argument('--added-names', type=str, help='Additional names to include in the certificate')
args = parser.parse_args()
c = client.Command()
if args.command == 'installbmccacert':
installbmccacert(args.noderange, args.filename, c)
elif args.command == 'removebmccacert':
removebmccacert(args.noderange, args.id, c)
elif args.command == 'listbmccacerts':
listbmccacerts(args.noderange, c)
elif args.command == 'signbmccert':
payload = {}
if args.days is not None:
payload['days'] = args.days
else:
print("Error: --days is required for signbmccert", file=sys.stderr)
sys.exit(1)
if args.added_names:
payload['added_names'] = args.added_names
for res in c.update(f'/noderange/{args.noderange}/configuration/management_controller/certificate/sign', payload):
print(repr(res))
else:
parser.print_help()
sys.exit(1)

View File

@@ -1,7 +1,7 @@
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
# Copyright 2025 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -96,6 +96,12 @@ cfgpaths = {
'bmc.static_ipv6_gateway': (
'configuration/management_controller/net_interfaces/management',
'static_v6_gateway'),
'bmc.vlan_id': (
'configuration/management_controller/net_interfaces/management',
'vlan_id'),
'bmc.mac_address': (
'configuration/management_controller/net_interfaces/management',
'hw_addr'),
'bmc.hostname': (
'configuration/management_controller/hostname', 'hostname'),
}

View File

@@ -15,6 +15,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import optparse
import os
import subprocess
@@ -28,8 +29,31 @@ import confluent.client as client
import confluent.sortutil as sortutil
import confluent.logreader as logreader
import time
import select
import signal
import socket
import re
import tty
import termios
import fcntl
import confluent.screensqueeze as sq
try:
from PIL import Image, ImageDraw, ImageFont
except ImportError:
Image = None
try:
# sixel is optional, attempt to import but stub out if unavailable
import io
import sixel
class DumbWriter(sixel.SixelWriter):
def restore_position(self, output):
return
except ImportError:
class DumbWriter():
def draw(self, imgfile):
sys.stderr.write("PySixel not detected, Sixel format display not supported\n")
confettypath = os.path.join(os.path.dirname(sys.argv[0]), 'confetty')
argparser = optparse.OptionParser(
@@ -46,6 +70,11 @@ argparser.add_option('-l', '--log', action='store_true', default=False,
argparser.add_option('-T', '--Timestamp', action='store_true', default=False,
help= 'Dump log in stdout with timestamps')
argparser.add_option('-s', '--screenshot', action='store_true', default=False,
help='Attempt to grab screenshot and render using kitty image protocol')
argparser.add_option('-i', '--interval', type='float',
help='Interval in seconds to redraw the screenshot. Currently only '
'works for one node')
argparser.add_option('-w','--windowed', action='store_true', default=False,
help='Open terminal windows for each node. The '
'environment variable NODECONSOLE_WINDOWED_COMMAND '
@@ -69,6 +98,234 @@ argparser.add_option('-w','--windowed', action='store_true', default=False,
(options, args) = argparser.parse_args()
oldtcattr = None
oldfl = None
def get_coords():
sys.stdout.write('\x1b[6n') #
sys.stdout.flush()
gotreply = select.select([sys.stdin,], [], [], 0.250)[0]
if gotreply:
response = ''
while select.select([sys.stdin,], [], [], 0.1)[0] and 'R' not in response:
response += sys.stdin.read()
coords = response.replace('R', '').split('[')[1].split(';')
#sys.stdout.write('\x1b[{}:{}H'.format(*coords))
def direct_console():
global oldtcattr
global oldfl
oldtcattr = termios.tcgetattr(sys.stdin.fileno())
oldfl = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL)
tty.setraw(sys.stdin.fileno())
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, oldfl | os.O_NONBLOCK)
def indirect_console():
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, oldfl & ~os.O_NONBLOCK)
termios.tcsetattr(sys.stdin.fileno(), termios.TCSANOW, oldtcattr)
def determine_tile_size(numnodes):
# for now, smash everything to a common aspect ratio. 16:11
# is pretty much wrong for everything, making 4:3 a bit too wide
# and 16:9 significantly too narrow, but it is serviceable
# An improvement could come with us owning the scaling
# instead of delegating to Kitty, which says if we specify both,
# we get stretching. In theory we should be able to get aspect correct
# from kitty by omitting, but:
# then we don't know how much to move the cursor left after draw_image
# Konsole won't scale at all with only partial scaling specified
cheight, cwidth, pixwidth, pixheight = sq.get_screengeom()
# 16:12 is to roughly account for the 'titles' of the tiles
ratio = (pixwidth / 16) / (pixheight / 12)
bestdeviation = None
bestdims = []
for i in range(1, numnodes + 1):
number = numnodes
while number % i != 0:
number += 1
columns = i
rows = number // i
deviation = abs(ratio - (columns / rows))
if bestdeviation is None:
bestdeviation = deviation
bestdims = [columns, rows]
elif deviation < bestdeviation:
bestdeviation = deviation
bestdims = [columns, rows]
# ok, the above algorithm can still pick things like
# 1 2 3
# 4
# So we will let it pick the number of rows, and
# then see if we can chop columns and still fit
while (bestdims[0] - 1) * bestdims[1] >= numnodes:
bestdims[0] = bestdims[0] - 1
cellswide = cwidth // bestdims[0]
cellshigh = cheight // bestdims[1]
tilewidth = cellswide * pixwidth / cwidth
tileheight = cellshigh * pixheight / cheight
if tilewidth > (tileheight * 16 / 11):
tilewidth = tileheight * 16 / 11
cellswide = int(tilewidth // (pixwidth / cwidth))
if tileheight > (tilewidth * 11 /16):
tileheight = tilewidth * 11 / 16
cellshigh = int(tileheight // (pixheight / cheight))
bestdims = bestdims + [cellswide, cellshigh, cellshigh * bestdims[1]]
# incur any scrolling we might get. This allows us to accurately
# save/restore cursor or even get coordinates without scrolling fouling
# the desired target
sys.stdout.write('\n' * bestdims[4])
sys.stdout.flush()
cursor_up(bestdims[4])
return bestdims
cursor_saved = False
def sticky_cursor():
global cursor_saved
# get cursor restore_position
if sys.stdin.isatty() and not cursor_saved:
try:
direct_console()
sys.stdout.write('\x1b7')
cursor_saved = True
finally:
indirect_console()
elif cursor_saved:
try:
direct_console()
sys.stdout.write('\x1b8')
finally:
indirect_console()
def cursor_up(count=1):
sys.stdout.write(f'\x1b[{count}A')
def cursor_down(count=1):
sys.stdout.write(f'\x1b[{count}B')
def cursor_right(count=1):
sys.stdout.write(f'\x1b[{count}C')
def cursor_left(count=1):
sys.stdout.write(f'\x1b[{count}D')
def cursor_save():
sys.stdout.write('\x1b7')
def cursor_restore():
sys.stdout.write('\x1b8')
def cursor_hide():
sys.stdout.write('\x1b[?25l')
def cursor_show():
sys.stdout.write('\x1b[?25h')
def get_pix_dimensions(width, height):
cheight, cwidth, pixwidth, pixheight = sq.get_screengeom()
imgwidth = int(pixwidth / cwidth * width)
imgheight = int(pixheight / cheight * height)
return imgwidth, imgheight
def draw_text(text, width, height):
if Image:
maxfntsize = 256
imgwidth, imgheight = get_pix_dimensions(width, height)
nerr = Image.new(mode='RGB', size=(imgwidth, imgheight), color='green')
nd = ImageDraw.Draw(nerr)
for txtpiece in text.split('\n'):
fntsize = 8
txtfont = ImageFont.truetype('DejaVuSans.ttf', size=fntsize)
while nd.textlength(txtpiece, font=txtfont) < int(imgwidth * 0.90):
fntsize += 1
txtfont = ImageFont.truetype('DejaVuSans.ttf', size=fntsize)
fntsize -= 1
if fntsize < maxfntsize:
maxfntsize = fntsize
hmargin = int(imgwidth * 0.05)
vmargin = int(imgheight * 0.10)
nd.text((hmargin, vmargin), text, font=txtfont)
nd.rectangle((0, 0, nerr.width - 1, nerr.height -1), outline='white')
outfile = io.BytesIO()
nerr.save(outfile, format='PNG')
data = base64.b64encode(outfile.getbuffer())
draw_image(data, width, height, doscale=False)
else:
sys.stdout.write(text)
cursor_left(len(text))
def draw_image(data, width, height, doscale=True):
imageformat = os.environ.get('CONFLUENT_IMAGE_PROTOCOL', 'kitty')
if doscale and Image and width:
bindata = base64.b64decode(data)
binfile = io.BytesIO()
binfile.write(bindata)
binfile.seek(0)
try:
img = Image.open(binfile)
except Exception as e:
errstr = 'Error rendering image:\n' + str(e)
return draw_text(errstr, width, height)
imgwidth, imgheight = get_pix_dimensions(width, height)
nimg = Image.new(mode='RGBA', size=(imgwidth, imgheight))
imgwidth -= 4
imgheight -= 4
hscalefact = imgwidth / img.width
vscalefact = imgheight / img.height
if hscalefact < vscalefact:
rzwidth = imgwidth
rzheight = int(img.height * hscalefact)
else:
rzwidth = int(img.width * vscalefact)
rzheight = imgheight
img = img.resize((rzwidth, rzheight))
nd = ImageDraw.Draw(nimg)
nd.rectangle((1, 1, rzwidth + 2, rzheight + 2), outline='black')
nd.rectangle((0, 0, rzwidth + 3, rzheight + 3), outline='white')
nimg.paste(img, box=(2, 2))
outfile = io.BytesIO()
nimg.save(outfile, format='PNG')
data = base64.b64encode(outfile.getbuffer())
if imageformat == 'sixel':
sixel_draw(data)
elif imageformat == 'iterm':
iterm_draw(data, width, height)
else:
kitty_draw(data, width, height)
def sixel_draw(data):
bindata = base64.b64decode(data)
binfile = io.BytesIO()
binfile.write(bindata)
binfile.seek(0)
DumbWriter().draw(binfile)
def iterm_draw(data, width, height):
if not height:
height = 'auto'
if not width:
width = 'auto'
bindata = base64.b64decode(data)
datalen = len(bindata)
sys.stdout.write(
'\x1b]1337;File=inline=1;width={};height={};size={}:'.format(width,height,datalen))
sys.stdout.write(data.decode('utf8'))
sys.stdout.write('\a')
sys.stdout.flush()
def kitty_draw(data, width, height):
preamble = '\x1b_Ga=T,f=100'
if height:
preamble += f',r={height},c={width}'
#sys.stdout.write(repr(preamble))
#sys.stdout.write('\xb[{}D'.format(len(repr(preamble))))
#return
first = True
while data:
chunk, data = data[:4096], data[4096:]
m = 1 if data else 0
if first:
sys.stdout.write('{},m={};'.format(preamble, m))
else:
sys.stdout.write('\x1b_Gm={};'.format(m))
sys.stdout.write(chunk.decode('utf8'))
sys.stdout.write('\x1b\\')
sys.stdout.flush()
pass_through_args = []
killcon = False
try:
@@ -106,6 +363,201 @@ if options.Timestamp:
logreader.dump_to_console(logname)
sys.exit(0)
def prep_node_tile(node):
currcolcell, currrowcell = nodepositions[node]
if currcolcell:
cursor_right(currcolcell)
if currrowcell:
cursor_down(currrowcell)
sys.stdout.write('▏' + node)
cursor_left(len(node) + 1)
cursor_down()
def reset_cursor(node):
currcolcell, currrowcell = nodepositions[node]
if currcolcell:
cursor_left(currcolcell)
cursor_up(currrowcell + 1)
nodepositions = {}
numrows = 0
cwidth = 0
cheight = 0
imagedatabynode = {}
def redraw():
for node in imagedatabynode:
imgdata = imagedatabynode[node]
if node in nodepositions:
prep_node_tile(node)
cursor_save()
else:
if options.interval is not None:
if node != firstnodename:
sys.stderr.write('Multiple nodes not supported for interval')
sys.exit(1)
sticky_cursor()
sys.stdout.write('{}: '.format(node))
# one row is used by our own name, so cheight - 1 for that allowance
draw_image(imgdata.encode(), cwidth, cheight - 1 if cheight else cheight)
if node in nodepositions:
cursor_restore()
reset_cursor(node)
else:
sys.stdout.write('\n')
sys.stdout.flush()
resized = False
def do_screenshot():
global resized
global numrows
sess = client.Command()
if options.tile:
imageformat = os.environ.get('CONFLUENT_IMAGE_PROTOCOL', 'kitty')
if imageformat not in ('kitty', 'iterm'):
sys.stderr.write('Tiled screenshots only supported with kitty or iterm protocol')
sys.exit(1)
allnodes = []
numnodes = 0
for res in sess.read('/noderange/{}/nodes/'.format(args[0])):
allnodes.append(res['item']['href'].replace('/', ''))
numnodes += 1
resized = False
def do_resize(a=None, b=None):
global resized
if a:
resized = True
# on a window resize, clear the old stuff
# ideally we'd retain the images and redraw them
sys.stdout.write('\x1bc')
global numrows
global cwidth
global cheight
cols, rows, cwidth, cheight, numrows = determine_tile_size(numnodes)
currcol = 1
currcolcell = 0
currrowcell = 0
for node in allnodes:
nodepositions[node] = currcolcell, currrowcell
if currcol < cols:
currcol += 1
currcolcell += cwidth
else:
currcol = 1
currcolcell = 0
currrowcell += cheight
if a:
redraw()
do_resize()
signal.signal(signal.SIGWINCH, do_resize)
elif options.interval is not None:
sys.stdout.write('\x1bc')
firstnodename = None
dorefresh = True
vnconly = set([])
while dorefresh:
for res in sess.read('/noderange/{}/console/ikvm_screenshot'.format(args[0])):
for node in res.get('databynode', {}):
errorstr = ''
if not firstnodename:
firstnodename = node
error = res['databynode'][node].get('error')
if error and 'vnc available' in error:
vnconly.add(node)
continue
elif error:
errorstr = error
imgdata = res['databynode'][node].get('image', {}).get('imgdata', None)
if imgdata:
if len(imgdata) < 32: # We were subjected to error
errorstr = f'Unable to get screenshot'
if errorstr or imgdata:
draw_node(node, imgdata, errorstr, firstnodename, cwidth, cheight)
if asyncvnc:
urlbynode = {}
for node in vnconly:
for res in sess.update(f'/nodes/{node}/console/ikvm', {'method': 'unix'}):
url = res.get('item', {}).get('href')
if url:
urlbynode[node] = url
draw_vnc_grabs(urlbynode, cwidth, cheight)
if resized:
do_resize(True)
resized = False
elif vnconly:
sys.stderr.write("Require asyncvnc installed to do VNC screenshotting\n")
if options.interval is None:
dorefresh = False
else:
dorefresh = True
time.sleep(options.interval)
sys.exit(0)
try:
import asyncio, asyncvnc
except ImportError:
asyncvnc = None
def draw_vnc_grabs(urlbynode, cwidth, cheight):
asyncio.run(grab_vncs(urlbynode, cwidth, cheight))
async def grab_vncs(urlbynode, cwidth, cheight):
tasks = []
for node in urlbynode:
url = urlbynode[node]
tasks.append(asyncio.create_task(do_vnc_screenshot(node, url, cwidth, cheight)))
await asyncio.gather(*tasks)
async def my_opener(host, port):
# really, host is the unix
return await asyncio.open_unix_connection(host)
async def do_vnc_screenshot(node, url, cwidth, cheight):
async with asyncvnc.connect(url, opener=my_opener) as client:
# Retrieve pixels as a 3D numpy array
pixels = await client.screenshot()
# Save as PNG using PIL/pillow
image = Image.fromarray(pixels)
outfile = io.BytesIO()
image.save(outfile, format='PNG')
imgdata = base64.b64encode(outfile.getbuffer()).decode()
if imgdata:
draw_node(node, imgdata, '', '', cwidth, cheight)
def draw_node(node, imgdata, errorstr, firstnodename, cwidth, cheight):
imagedatabynode[node] = imgdata
if node in nodepositions:
prep_node_tile(node)
cursor_save()
else:
if options.interval is not None:
if node != firstnodename:
sys.stderr.write('Multiple nodes not supported for interval')
sys.exit(1)
sticky_cursor()
sys.stdout.write('{}: '.format(node))
# one row is used by our own name, so cheight - 1 for that allowance
if errorstr:
draw_text(errorstr, cwidth, cheight -1 if cheight else cheight)
else:
draw_image(imgdata.encode(), cwidth, cheight - 1 if cheight else cheight)
if node in nodepositions:
cursor_restore()
reset_cursor(node)
else:
sys.stdout.write('\n')
sys.stdout.flush()
if options.screenshot:
try:
cursor_hide()
do_screenshot()
except KeyboardInterrupt:
pass
finally:
cursor_show()
cursor_down(numrows)
sys.stdout.write('\n')
sys.exit(0)
def kill(noderange):
sess = client.Command()
envstring=os.environ.get('NODECONSOLE_WINDOWED_COMMAND')
@@ -113,29 +565,23 @@ def kill(noderange):
envstring = 'xterm'
nodes = []
for res in sess.read('/noderange/{0}/nodes/'.format(args[0])):
for res in sess.read('/noderange/{0}/nodes/'.format(noderange)):
node = res.get('item', {}).get('href', '/').replace('/', '')
if not node:
sys.stderr.write(res.get('error', repr(res)) + '\n')
sys.exit(1)
nodes.append(node)
for node in nodes:
s=socket.socket(socket.AF_UNIX)
winid=None
for node in nodes:
command = "ps auxww | grep {0} | grep console | egrep '\\b{1}\\b' | grep -v grep | awk '{{print $2}}'".format(envstring, node)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
try:
win=subprocess.Popen(['xwininfo', '-tree', '-root'], stdout=subprocess.PIPE)
wintr=win.communicate()[0]
for line in wintr.decode('utf-8').split('\n'):
if 'console: {0}'.format(node) in line or 'confetty' in line:
win_obj = [ele for ele in line.split(' ') if ele.strip()]
winid = win_obj[0]
except:
print("Error: cannot retrieve window id of node {}".format(node))
if winid:
ps_data=subprocess.Popen(['xkill', '-id', winid ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process_id = stdout.decode('utf-8').split()[0]
except IndexError:
sys.stderr.write(node + ": console window not found \n")
continue
subprocess.Popen(["kill", process_id], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sys.exit(0)
def handle_geometry(envlist, sizegeometry, side_pad=0, top_pad=0, first=False):
@@ -257,7 +703,6 @@ if options.windowed:
screenheight -= wmyo
currx = window_width
curry = 0
maxcol = int(screenwidth/window_width)
for node in sortutil.natural_sort(nodes):
if options.tile and envlist[0] == 'xterm':
@@ -265,7 +710,7 @@ if options.windowed:
corrected_y = curry
xgeometry = '{0}+{1}+{2}'.format(sizegeometry, corrected_x, corrected_y)
currx += window_width
if currx >= screenwidth:
if currx + window_width >= screenwidth:
currx=0
curry += window_height
if curry > screenheight:

View File

@@ -48,7 +48,18 @@ def armonce(nr, cli):
pass
def setpending(nr, profile, cli):
def setpending(nr, profile, profilebynodes, cli):
if profilebynodes:
for node in sortutil.natural_sort(profilebynodes):
prof = profilebynodes[node]
args = {'deployment.pendingprofile': prof, 'deployment.state': '', 'deployment.state_detail': ''}
if not prof.startswith('genesis-'):
args['deployment.stagedprofile'] = ''
args['deployment.profile'] = ''
for rsp in cli.update('/nodes/{0}/attributes/current'.format(node),
args):
pass
return
args = {'deployment.pendingprofile': profile, 'deployment.state': '', 'deployment.state_detail': ''}
if not profile.startswith('genesis-'):
args['deployment.stagedprofile'] = ''
@@ -69,6 +80,7 @@ def main(args):
ap.add_argument('-n', '--network', help='Initiate deployment over PXE/HTTP', action='store_true')
ap.add_argument('-p', '--prepareonly', help='Prepare only, skip any interaction with a BMC associated with this deployment action', action='store_true')
ap.add_argument('-m', '--maxnodes', help='Specifiy a maximum nodes to be deployed')
ap.add_argument('-r', '--redeploy', help='Redeploy nodes with the current or pending profile', action='store_true')
ap.add_argument('noderange', help='Set of nodes to deploy')
ap.add_argument('profile', nargs='?', help='Profile name to deploy')
args, extra = ap.parse_known_args(args)
@@ -78,7 +90,7 @@ def main(args):
if args.profile and not args.network:
sys.stderr.write('-n is a required argument currently to perform an install, optionally with -p\n')
return 1
if not args.profile and args.network:
if not args.profile and args.network and not args.redeploy:
sys.stderr.write('Both noderange and a profile name are required arguments to request a network deployment\n')
return 1
if args.clear and args.profile:
@@ -96,29 +108,68 @@ def main(args):
if 'error' in rsp:
sys.stderr.write(rsp['error'] + '\n')
sys.exit(1)
profilebynode = {}
if args.clear:
cleararm(args.noderange, c)
clearpending(args.noderange, c)
elif args.profile:
profnames = []
for prof in c.read('/deployment/profiles/'):
profname = prof.get('item', {}).get('href', None)
if profname:
profname = profname.replace('/', '')
profnames.append(profname)
if profname == args.profile:
break
else:
sys.stderr.write('The specified profile "{}" is not an available profile\n'.format(args.profile))
if profnames:
sys.stderr.write('The following profiles are available:\n')
for profname in profnames:
sys.stderr.write(' ' + profname + '\n')
else:
sys.stderr.write('No deployment profiles available, try osdeploy import or imgutil capture\n')
elif args.redeploy:
hadpending = {}
for rsp in c.read('/noderange/{0}/attributes/current'.format(args.noderange)):
for node in rsp.get('databynode', {}):
nodeinfo = rsp['databynode'][node]
for attr in nodeinfo:
if attr == 'deployment.pendingprofile':
curr = nodeinfo[attr].get('value', '')
if curr:
hadpending[node] = True
profilebynode[node] = curr
if attr == 'deployment.stagedprofile':
curr = nodeinfo[attr].get('value', '')
if curr and node not in hadpending:
profilebynode[node] = curr
if attr == 'deployment.profile':
curr = nodeinfo[attr].get('value', '')
if curr and node not in profilebynode:
profilebynode[node] = curr
for lockinfo in c.read('/noderange/{0}/deployment/lock'.format(args.noderange)):
for node in lockinfo.get('databynode', {}):
lockstate = lockinfo['databynode'][node]['lock']['value']
if lockstate == 'locked':
lockednodes.append(node)
if args.profile and profilebynode:
sys.stderr.write('The -r/--redeploy option cannot be used with a profile, it redeploys the current or pending profile\n')
return 1
if args.profile or profilebynode:
lockednodes = []
for lockinfo in c.read('/noderange/{0}/deployment/lock'.format(args.noderange)):
for node in lockinfo.get('databynode', {}):
lockstate = lockinfo['databynode'][node]['lock']['value']
if lockstate == 'locked':
lockednodes.append(node)
if lockednodes:
sys.stderr.write('Requested noderange has nodes with locked deployment: ' + ','.join(lockednodes))
sys.stderr.write('\n')
sys.exit(1)
if args.profile:
profnames = []
for prof in c.read('/deployment/profiles/'):
profname = prof.get('item', {}).get('href', None)
if profname:
profname = profname.replace('/', '')
profnames.append(profname)
if profname == args.profile:
break
else:
sys.stderr.write('The specified profile "{}" is not an available profile\n'.format(args.profile))
if profnames:
sys.stderr.write('The following profiles are available:\n')
for profname in profnames:
sys.stderr.write(' ' + profname + '\n')
else:
sys.stderr.write('No deployment profiles available, try osdeploy import or imgutil capture\n')
sys.exit(1)
armonce(args.noderange, c)
setpending(args.noderange, args.profile, c)
setpending(args.noderange, args.profile, profilebynode, c)
else:
databynode = {}
for r in c.read('/noderange/{0}/attributes/current'.format(args.noderange)):

View File

@@ -123,7 +123,7 @@ def process_header(header):
fields.append('serial')
elif datum == 'uuid':
fields.append('uuid')
elif datum in ('bmc', 'imm', 'xcc'):
elif datum in ('bmc', 'imm', 'xcc', 'ip'):
fields.append('hardwaremanagement.manager')
elif datum in ('bmc gateway', 'xcc gateway', 'imm gateway'):
fields.append('net.bmc.ipv4_gateway')
@@ -191,6 +191,7 @@ async def import_csv(options, session):
if field in unique_fields:
unique_data[field] = set([])
broken = False
alldata=[]
for record in records:
currfields = list(fields)
nodedatum = {}
@@ -207,9 +208,15 @@ async def import_csv(options, session):
nodedatum[currfield] = datum
if not datum_complete(nodedatum):
sys.exit(1)
alldata.append(nodedatum)
allthere = True
for nodedatum in alldata:
if not search_record(nodedatum, options, session) and not broken:
allthere = False
await blocking_scan(session)
if not search_record(nodedatum, options, session):
break
for nodedatum in alldata:
if not allthere and not search_record(nodedatum, options, session):
sys.stderr.write(
"Could not match the following data: " +
repr(nodedatum) + '\n')
@@ -230,8 +237,12 @@ async def import_csv(options, session):
print('Defined ' + res['created'])
else:
print(repr(res))
child = os.fork()
if child:
continue
for mac in maclist:
for res in session.update('/discovery/by-mac/{0}'.format(mac),
mysess = client.Command()
for res in mysess.update('/discovery/by-mac/{0}'.format(mac),
{'node': nodename}):
if 'error' in res:
sys.stderr.write(res['error'] + '\n')
@@ -241,6 +252,12 @@ async def import_csv(options, session):
print('Discovered ' + res['assigned'])
else:
print(repr(res))
sys.exit(0)
while True:
try:
os.wait()
except ChildProcessError:
break
if exitcode:
sys.exit(exitcode)

View File

@@ -78,8 +78,11 @@ exitcode = 0
def format_event(evt):
retparts = []
if 'timestamp' in evt and evt['timestamp'] is not None:
display = dt.strptime(evt['timestamp'], '%Y-%m-%dT%H:%M:%S')
retparts.append(display.strftime('%m/%d/%Y %H:%M:%S'))
try:
display = dt.strptime(evt['timestamp'], '%Y-%m-%dT%H:%M:%S')
retparts.append(display.strftime('%m/%d/%Y %H:%M:%S'))
except ValueError:
display = ''
dscparts = []
if evt.get('log_id', None):
retparts.append(evt['log_id'] + ':')

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016-2017 Lenovo
@@ -56,7 +56,7 @@ components = ['all']
argparser = optparse.OptionParser(
usage="Usage: "
"%prog <noderange> [list][update [--backup <file>]]|[<components>]")
"%prog <noderange> [list][updatestatus][update [--backup <file>]]|[<components>]")
argparser.add_option('-b', '--backup', action='store_true',
help='Target a backup bank rather than primary')
argparser.add_option('-m', '--maxnodes', type='int',
@@ -65,14 +65,18 @@ argparser.add_option('-m', '--maxnodes', type='int',
(options, args) = argparser.parse_args()
upfile = None
querystatus = False
try:
noderange = args[0]
if len(args) > 1:
if args[1] == 'update':
upfile = args[2]
else:
comps = []
if args[1] == 'list':
comps = args[2:]
elif args[1] == 'updatestatus':
querystatus = True
else:
comps = args[1:]
components = []
@@ -110,11 +114,24 @@ def update_firmware(session, filename):
upargs['bank'] = 'backup'
noderrs = {}
if session.unixdomain:
of = open(filename, 'rb')
try:
session.add_file(filename, of.fileno(), 'rb')
except Exception:
pass
filesbynode = {}
for exp in session.create('/noderange/{0}/attributes/expression'.format(noderange),
{'expression': filename}):
if 'error' in exp:
sys.stderr.write(exp['error'] + '\n')
exitcode |= exp.get('errorcode', 1)
ex = exp.get('databynode', ())
for node in ex:
filesbynode[node] = ex[node]['value']
if not isinstance(filesbynode[node], bytes) and not isinstance(filesbynode[node], str):
filesbynode[node] = filesbynode[node].encode('utf-8')
for node in filesbynode:
endfilename = filesbynode[node]
of = open(endfilename, 'rb')
try:
session.add_file(endfilename, of.fileno(), 'rb')
except Exception:
pass
for res in session.create(resource, upargs):
if 'created' not in res:
for nodename in res.get('databynode', ()):
@@ -149,9 +166,13 @@ def show_firmware(session):
firmware_shown = False
nodes_matched = False
for component in components:
category = 'all'
if component in ('adapters', 'disks', 'misc', 'core'):
category = component
component = 'all'
for res in session.read(
'/noderange/{0}/inventory/firmware/all/{1}'.format(
noderange, component)):
'/noderange/{0}/inventory/firmware/{2}/{1}'.format(
noderange, component, category)):
nodes_matched = True
exitcode |= client.printerror(res)
if 'databynode' not in res:
@@ -171,7 +192,16 @@ def show_firmware(session):
try:
session = client.Command()
if upfile is None:
if querystatus:
for res in session.read(
'/noderange/{0}/inventory/firmware/updatestatus'.format(noderange)):
for node in res.get('databynode', {}):
currstat = res['databynode'][node].get('status', None)
if currstat:
print('{}: {}'.format(node, currstat))
else:
print(repr(res))
elif upfile is None:
show_firmware(session)
else:
update_firmware(session, upfile)

View File

@@ -49,7 +49,9 @@ def pretty(text):
def print_mem_info(node, prefix, meminfo):
memdescfmt = '{0}GB PC'
if meminfo['memory_type'] == 'DDR3 SDRAM':
if meminfo['memory_type'] is None:
memdescfmt = '{0}GB '
elif meminfo['memory_type'] == 'DDR3 SDRAM':
memdescfmt += '3-{1} '
elif 'DDR4' in meminfo['memory_type']:
memdescfmt += '4-{1} '
@@ -58,16 +60,21 @@ def print_mem_info(node, prefix, meminfo):
elif 'DCPMM' in meminfo['memory_type']:
memdescfmt = '{0}GB {1} '
meminfo['module_type'] = 'DCPMM'
elif meminfo['memory_type'] == 'HBM':
memdescfmt = '{0}GB HBM '
else:
print('{0}: {1}: Unrecognized Memory'.format(node, prefix))
return
if meminfo.get('ecc', False):
memdescfmt += 'ECC '
capacity = meminfo['capacity_mb'] / 1024
modtype = meminfo.get('module_type', None)
if modtype:
memdescfmt += modtype
memdesc = memdescfmt.format(capacity, meminfo['speed'])
if meminfo.get('capacity_mb', None):
capacity = meminfo['capacity_mb'] // 1024
memdesc = memdescfmt.format(capacity, meminfo['speed'])
else:
memdesc = 'Unspecified Module'
print('{0}: {1} description: {2}'.format(node, prefix, memdesc))
print('{0}: {1} manufacturer: {2}'.format(
node, prefix, meminfo['manufacturer']))

View File

@@ -61,7 +61,10 @@ def get_neighbors(switch):
switch_neigbors = []
url = '/networking/neighbors/by-switch/{0}/by-peername/'.format(switch)
for neighbor in session.read(url):
switch = neighbor['item']['href'].strip('/')
try:
switch = neighbor['item']['href'].strip('/')
except:
continue
if switch in all_switches:
switch_neigbors.append(switch)
return switch_neigbors

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2019 Lenovo
@@ -65,16 +65,30 @@ client.check_globbing(noderange)
def install_license(session, filename):
global exitcode
resource = '/noderange/{0}/configuration/' \
'management_controller/licenses/'.format(noderange)
filename = os.path.abspath(filename)
instargs = {'filename': filename}
if session.unixdomain:
of = open(filename, 'rb')
try:
session.add_file(filename, of.fileno(), 'rb')
except Exception:
pass
filesbynode = {}
for exp in session.create('/noderange/{0}/attributes/expression'.format(noderange),
{'expression': filename}):
if 'error' in exp:
sys.stderr.write(exp['error'] + '\n')
exitcode |= exp.get('errorcode', 1)
ex = exp.get('databynode', ())
for node in ex:
filesbynode[node] = ex[node]['value']
if not isinstance(filesbynode[node], bytes) and not isinstance(filesbynode[node], str):
filesbynode[node] = filesbynode[node].encode('utf-8')
for node in filesbynode:
endfilename = filesbynode[node]
of = open(endfilename, 'rb')
try:
session.add_file(endfilename, of.fileno(), 'rb')
except Exception:
pass
for res in session.create(resource, instargs):
for node in res.get('databynode', []):
if 'error' in res['databynode'][node]:

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2018 Lenovo
@@ -101,22 +101,37 @@ def detach_media(noderange, media):
def upload_media(noderange, media):
global exitcode
if not os.path.exists(media):
sys.stderr.write('Unable to locate requested file {0}\n'.format(
media))
sys.exit(404)
session = client.Command()
output = sq.ScreenPrinter(noderange, session)
filename = os.path.abspath(media)
resource = '/noderange/{0}/media/uploads/'.format(noderange)
filename = os.path.abspath(filename)
upargs = {'filename': filename}
noderrs = {}
if session.unixdomain:
of = open(filename, 'rb')
try:
session.add_file(filename, of.fileno(), 'rb')
except Exception:
pass
filesbynode = {}
for exp in session.create('/noderange/{0}/attributes/expression'.format(noderange),
{'expression': filename}):
if 'error' in exp:
sys.stderr.write(exp['error'] + '\n')
exitcode |= exp.get('errorcode', 1)
ex = exp.get('databynode', ())
for node in ex:
filesbynode[node] = ex[node]['value']
if not isinstance(filesbynode[node], bytes) and not isinstance(filesbynode[node], str):
filesbynode[node] = filesbynode[node].encode('utf-8')
for node in filesbynode:
endfilename = filesbynode[node]
if not os.path.exists(endfilename):
sys.stderr.write('Unable to locate requested file {0}\n'.format(
endfilename))
sys.exit(404)
of = open(endfilename, 'rb')
try:
session.add_file(endfilename, of.fileno(), 'rb')
except Exception:
pass
nodeurls = {}
for res in session.create(resource, upargs):
if 'created' not in res:

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016-2017 Lenovo
@@ -42,6 +42,8 @@ def run():
usage="Usage: %prog [options] noderange")
argparser.add_option('-f', '-c', '--count', type='int', default=168,
help='Number of commands to run at a time')
argparser.add_option('-o', '--origname', action='store_true',
help='Use original nodename in print out even if substituted')
argparser.add_option('-s', '--substitutename',
help='Use a different name other than the nodename for ping, with {}, it is the entire name evaluated as an expression, otherwise it is used as a suffix')
# among other things, FD_SETSIZE limits. Besides, spawning too many
@@ -56,6 +58,7 @@ def run():
currprocs = 0
all = set([])
poller = select.epoll()
pipedesc = {}
pendingexecs = deque()
exitcode = 0
@@ -83,19 +86,29 @@ def run():
cmdv = ['ping', '-c', '1', '-W', '1', pingnode]
if currprocs < concurrentprocs:
currprocs += 1
run_cmdv(pingnode, cmdv, all, pipedesc)
if options.origname:
run_cmdv(node, cmdv, all, poller, pipedesc)
else:
run_cmdv(pingnode, cmdv, all, poller, pipedesc)
else:
pendingexecs.append((pingnode, cmdv))
if options.origname:
pendingexecs.append((node, cmdv))
else:
pendingexecs.append((pingnode, cmdv))
if not all or exitcode:
sys.exit(exitcode)
rdy, _, _ = select.select(all, [], [], 10)
rdy = poller.poll(10)
while all:
pernodeout = {}
for r in rdy:
r = r[0]
desc = pipedesc[r]
r = desc['file']
node = desc['node']
data = True
while data and select.select([r], [], [], 0)[0]:
singlepoller = select.epoll()
singlepoller.register(r, select.EPOLLIN)
while data and singlepoller.poll(0):
data = r.readline()
if not data:
pop = desc['popen']
@@ -103,6 +116,7 @@ def run():
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
poller.unregister(r)
r.close()
if desc['type'] == 'stdout':
if ret:
@@ -111,7 +125,8 @@ def run():
print('{0}: ping'.format(node))
if pendingexecs:
node, cmdv = pendingexecs.popleft()
run_cmdv(node, cmdv, all, pipedesc)
run_cmdv(node, cmdv, all, poller, pipedesc)
singlepoller.close()
for node in sortutil.natural_sort(pernodeout):
for line in pernodeout[node]:
line = client.stringify(line)
@@ -121,19 +136,21 @@ def run():
sys.stdout.write('{0}: {1}'.format(node, line))
sys.stdout.flush()
if all:
rdy, _, _ = select.select(all, [], [], 10)
rdy = poller.poll(10)
sys.exit(exitcode)
def run_cmdv(node, cmdv, all, pipedesc):
def run_cmdv(node, cmdv, all, poller, pipedesc):
nopen = subprocess.Popen(
cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pipedesc[nopen.stdout] = {'node': node, 'popen': nopen,
'type': 'stdout'}
pipedesc[nopen.stderr] = {'node': node, 'popen': nopen,
'type': 'stderr'}
pipedesc[nopen.stdout.fileno()] = {'node': node, 'popen': nopen,
'type': 'stdout', 'file': nopen.stdout}
pipedesc[nopen.stderr.fileno()] = {'node': node, 'popen': nopen,
'type': 'stderr', 'file': nopen.stderr}
all.add(nopen.stdout)
poller.register(nopen.stdout, select.EPOLLIN)
all.add(nopen.stderr)
poller.register(nopen.stderr, select.EPOLLIN)
if __name__ == '__main__':

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016-2017 Lenovo
@@ -67,6 +67,7 @@ def run():
currprocs = 0
all = set([])
poller = select.epoll()
pipedesc = {}
pendingexecs = deque()
exitcode = 0
@@ -84,19 +85,23 @@ def run():
cmdv = shlex.split(cmd)
if currprocs < concurrentprocs:
currprocs += 1
run_cmdv(node, cmdv, all, pipedesc)
run_cmdv(node, cmdv, all, poller, pipedesc)
else:
pendingexecs.append((node, cmdv))
if not all or exitcode:
sys.exit(exitcode)
rdy, _, _ = select.select(all, [], [], 10)
rdy = poller.poll(10)
while all:
pernodeout = {}
for r in rdy:
r = r[0]
desc = pipedesc[r]
r = desc['file']
node = desc['node']
data = True
while data and select.select([r], [], [], 0)[0]:
singlepoller = select.epoll()
singlepoller.register(r, select.EPOLLIN)
while data and singlepoller.poll(0):
data = r.readline()
if data:
if desc['type'] == 'stdout':
@@ -116,10 +121,12 @@ def run():
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
poller.unregister(r)
r.close()
if desc['type'] == 'stdout' and pendingexecs:
node, cmdv = pendingexecs.popleft()
run_cmdv(node, cmdv, all, pipedesc)
run_cmdv(node, cmdv, all, poller, pipedesc)
singlepoller.close()
for node in sortutil.natural_sort(pernodeout):
for line in pernodeout[node]:
line = client.stringify(line)
@@ -129,11 +136,11 @@ def run():
sys.stdout.write('{0}: {1}'.format(node, line))
sys.stdout.flush()
if all:
rdy, _, _ = select.select(all, [], [], 10)
rdy = poller.poll(10)
sys.exit(exitcode)
def run_cmdv(node, cmdv, all, pipedesc):
def run_cmdv(node, cmdv, all, poller, pipedesc):
try:
nopen = subprocess.Popen(
cmdv, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -142,13 +149,14 @@ def run_cmdv(node, cmdv, all, pipedesc):
sys.stderr.write('{0}: Unable to find local executable file "{1}"'.format(node, cmdv[0]))
return
raise
pipedesc[nopen.stdout] = {'node': node, 'popen': nopen,
'type': 'stdout'}
pipedesc[nopen.stderr] = {'node': node, 'popen': nopen,
'type': 'stderr'}
pipedesc[nopen.stdout.fileno()] = {'node': node, 'popen': nopen,
'type': 'stdout', 'file': nopen.stdout}
pipedesc[nopen.stderr.fileno()] = {'node': node, 'popen': nopen,
'type': 'stderr', 'file': nopen.stderr}
all.add(nopen.stdout)
poller.register(nopen.stdout, select.EPOLLIN)
all.add(nopen.stderr)
poller.register(nopen.stderr, select.EPOLLIN)
if __name__ == '__main__':
run()

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/python3
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2016-2017 Lenovo
@@ -109,6 +109,7 @@ def run():
ex = exp.get('databynode', ())
for node in ex:
cmdparms.append((node, ex[node]['value']))
poller = select.epoll()
for node, cmd in cmdparms:
sshnode = nodemap.get(node, node)
if not isinstance(cmd, str) and not isinstance(cmd, bytes):
@@ -121,19 +122,23 @@ def run():
cmdv += [sshnode, cmd]
if currprocs < concurrentprocs:
currprocs += 1
run_cmdv(node, cmdv, all, pipedesc)
run_cmdv(node, cmdv, all, poller, pipedesc)
else:
pendingexecs.append((node, cmdv))
if not all or exitcode:
sys.exit(exitcode)
rdy, _, _ = select.select(all, [], [], 10)
rdy = poller.poll(10)
while all:
pernodeout = {}
for r in rdy:
r = r[0]
desc = pipedesc[r]
r = desc['file']
node = desc['node']
data = True
while data and select.select([r], [], [], 0)[0]:
singlepoller = select.epoll()
singlepoller.register(r, select.EPOLLIN)
while data and singlepoller.poll(0):
data = r.readline()
if data:
if desc['type'] == 'stdout':
@@ -153,10 +158,12 @@ def run():
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
poller.unregister(r)
r.close()
if desc['type'] == 'stdout' and pendingexecs:
node, cmdv = pendingexecs.popleft()
run_cmdv(node, cmdv, all, pipedesc)
run_cmdv(node, cmdv, all, poller, pipedesc)
singlepoller.close()
for node in sortutil.natural_sort(pernodeout):
for line in pernodeout[node]:
line = client.stringify(line)
@@ -167,19 +174,21 @@ def run():
sys.stdout.write('{0}: {1}'.format(node, line))
sys.stdout.flush()
if all:
rdy, _, _ = select.select(all, [], [], 10)
rdy = poller.poll(10)
sys.exit(exitcode)
def run_cmdv(node, cmdv, all, pipedesc):
def run_cmdv(node, cmdv, all, poller, pipedesc):
nopen = subprocess.Popen(
cmdv, stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pipedesc[nopen.stdout] = {'node': node, 'popen': nopen,
'type': 'stdout'}
pipedesc[nopen.stderr] = {'node': node, 'popen': nopen,
'type': 'stderr'}
pipedesc[nopen.stdout.fileno()] = {'node': node, 'popen': nopen,
'type': 'stdout', 'file': nopen.stdout}
pipedesc[nopen.stderr.fileno()] = {'node': node, 'popen': nopen,
'type': 'stderr', 'file': nopen.stderr}
all.add(nopen.stdout)
all.add(nopen.stderr)
poller.register(nopen.stdout, select.EPOLLIN)
poller.register(nopen.stderr, select.EPOLLIN)
if __name__ == '__main__':

View File

@@ -19,6 +19,7 @@ import argparse
import base64
import csv
import io
import os
import numpy as np
import sys
@@ -72,6 +73,8 @@ def plot(gui, output, plotdata, bins, fmt):
tdata = io.BytesIO()
plt.savefig(tdata)
if not gui and not output:
if fmt == 'environment':
fmt = os.environ.get('CONFLUENT_IMAGE_PROTOCOL', 'kitty')
if fmt == 'sixel':
writer = DumbWriter()
writer.draw(tdata)
@@ -108,7 +111,7 @@ aparser = argparse.ArgumentParser(description='Quick access to common statistics
aparser.add_argument('-c', type=int, default=0, help='Column number to analyze (default is last column)')
aparser.add_argument('-d', default=None, help='Value used to separate columns')
aparser.add_argument('-x', default=False, action='store_true', help='Output histogram in graphical format')
aparser.add_argument('-f', default='sixel', help='Format for histogram output (sixel/iterm/kitty)')
aparser.add_argument('-f', default='environment', help='Format for histogram output (sixel/iterm/kitty)')
aparser.add_argument('-s', default=0, help='Number of header lines to skip before processing')
aparser.add_argument('-g', default=False, action='store_true', help='Open histogram in separate graphical window')
aparser.add_argument('-o', default=None, help='Output histogram to the specified filename in PNG format')

View File

@@ -57,7 +57,7 @@ def stringify(instr):
# Normalize unicode and bytes to 'str', correcting for
# current python version
if isinstance(instr, bytes) and not isinstance(instr, str):
return instr.decode('utf-8')
return instr.decode('utf-8', errors='replace')
elif not isinstance(instr, bytes) and not isinstance(instr, str):
return instr.encode('utf-8')
return instr
@@ -464,8 +464,8 @@ def printattributes(session, requestargs, showtype, nodetype, noderange, options
def _sort_attrib(k):
if isinstance(k[1], dict) and k[1].get('sortid', None) is not None:
return k[1]['sortid']
return k[0]
return sortutil.naturalize_string('{}'.format(k[1]['sortid']))
return sortutil.naturalize_string(k[0])
def print_attrib_path(path, session, requestargs, options, rename=None, attrprefix=None):
exitcode = 0

View File

@@ -18,8 +18,9 @@ import struct
import termios
def get_screengeom():
return struct.unpack('hh', fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ,
b'....'))
# returns height in cells, width in cells, width in pixels, height in pixels
return struct.unpack('hhhh', fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ,
b'........'))
class ScreenPrinter(object):
def __init__(self, noderange, client, textlen=4):
@@ -58,7 +59,7 @@ class ScreenPrinter(object):
def drawscreen(self, node=None):
if self.squeeze:
currheight, currwidth = get_screengeom()
currheight, currwidth, _, _ = get_screengeom()
currheight -= 2
if currheight < 1:
currheight = 1
@@ -120,6 +121,7 @@ if __name__ == '__main__':
c = client.Command()
p = ScreenPrinter('d1-d12', c)
p.set_output('d3', 'Upload: 67%')
p.set_output('d7', 'Upload: 67%')

View File

@@ -1,7 +1,7 @@
%define name confluent_client
%define version #VERSION#
%define fversion %{lua:
sv, _ = string.gsub("#VERSION#", "[~+]", "-")
sv, _ = string.gsub("#VERSION#", "[~]", "-")
print(sv)
}
%define release 1

View File

@@ -13,7 +13,7 @@ noderange. There are two general approaches.
It can be used ad-hoc, using -i and -n to specify the address and name portions respectively. This accepts the standard confluent expression syntax, allowing for things like 172.30.1.{n1} or {node}.{dns.domain} or {bmc}.
It can also read from the confluent db, using `-a`. In this mode, each net.<value>.<attribute> group is pulled together into hosts lines. ipv4_address and ipv6_address fields are associated with the corresponding hostname attributes.
It can also read from the confluent db, using `-a`. In this mode, each net.<value>.<attribute> group is pulled together into hosts lines. ipv4_address and ipv6_address fields are associated with the corresponding hostname attributes. You can use `-f` to put the FQDN first.
## EXAMPLES

View File

@@ -38,5 +38,8 @@ the json files (password protected, removed from the files, or unprotected).
keys do not change and as such they do not require
incremental backup.
* `-y`, `--yaml
Use YAML instead of JSON as file format
* `-h`, `--help`:
Show help message and exit

View File

@@ -0,0 +1,139 @@
# imgutil(1) -- Work with confluent OS cloning and diskless images
## SYNOPSIS
`imgutil` `build` [<options>] <scratchdir>
`imgutil` `exec` [<options>] <scratchdir> [<cmd>...]
`imgutil` `unpack` <profilename> <scratchdir>
`imgutil` `pack` [<options>] <scratchdir> <profilename>
`imgutil` `capture` <node> <profilename>
## DESCRIPTION
**imgutil** is a utility for creating, managing, and deploying OS images for diskless boot and system cloning in a Confluent environment. It supports building images from scratch, capturing images from running systems, and packing/unpacking diskless profiles.
## COMMANDS
* `build`:
Build a new diskless image from scratch in the specified scratch directory.
* `exec`:
Start the specified scratch directory as a container and optionally run a command inside it.
* `unpack`:
Unpack a diskless image profile to a scratch directory for modification.
* `pack`:
Pack a scratch directory into a diskless profile that can be deployed.
* `capture`:
Capture an image for cloning from a running system.
## BUILD OPTIONS
* `-r`, `--addrepos` <repository>:
Repositories to add in addition to the main source. May be specified multiple times.
* `-p`, `--packagelist` <file>:
Filename of package list to replace default pkglist.
* `-a`, `--addpackagelist` <file>:
A list of additional packages to include. May be specified multiple times.
* `-s`, `--source` <directory>:
Directory to pull installation from, typically a subdirectory of `/var/lib/confluent/distributions`. By default, the repositories for the build system are used. For Ubuntu, this is not supported; the build system repositories are always used.
* `-y`, `--non-interactive`:
Avoid prompting for confirmation.
* `-v`, `--volume` <mount>:
Directory to make available in the build environment. `-v /` will cause it to be mounted in image as `/run/external/`. `-v /:/run/root` will override the target to be `/run/root`. Something like `/var/lib/repository:-` will cause it to mount to the identical path inside the image. May be specified multiple times.
* <scratchdir>:
Directory to build new diskless root in.
## EXEC OPTIONS
* `-v`, `--volume` <mount>:
Directory to make available in the build environment. `-v /` will cause it to be mounted in image as `/run/external/`. `-v /:/run/root` will override the target to be `/run/root`. May be specified multiple times.
* <scratchdir>:
Directory of an unpacked diskless root.
* <cmd>:
Optional command to run (defaults to a shell).
## UNPACK OPTIONS
* <profilename>:
The diskless OS profile to unpack.
* <scratchdir>:
Directory to extract diskless root to.
## PACK OPTIONS
* `-b`, `--baseprofile` <profile>:
Profile to copy extra info from. For example, to make a new version of an existing profile, reference the previous one as baseprofile.
* `-u`, `--unencrypted`:
Pack an unencrypted image rather than encrypting.
* <scratchdir>:
Directory containing diskless root.
* <profilename>:
The desired diskless OS profile name to pack the root into.
## CAPTURE OPTIONS
* <node>:
Node to capture image from.
* <profilename>:
Profile name for captured image.
## EXAMPLES
Build a diskless image from a distribution:
imgutil build -s alma-9.6-x86_64 /tmp/myimage
Execute a shell in an unpacked image:
imgutil exec /tmp/myimage
Execute a specific command in an image:
imgutil exec /tmp/myimage /bin/rpm -qa
Unpack an existing profile for modification:
imgutil unpack myprofile /tmp/myimage
Pack a modified image into a new profile:
imgutil pack /tmp/myimage myprofile-v2
Capture an image from a running node:
imgutil capture node01 production-image
## FILES
* `/var/lib/confluent/public/os/`:
Default location for OS profiles.
* `/var/lib/confluent/private/os/`:
Location for encrypted image keys and private data.
* `/var/lib/confluent/distributions/`:
Default location for installation sources.
## SEE ALSO
osdeploy(8)
## AUTHOR
Written for the Confluent project.

View File

@@ -1,38 +0,0 @@
l2traceroute(8) -- returns the layer 2 route through an Ethernet network managed by confluent given 2 end points.
==============================
## SYNOPSIS
`l2traceroute [options] <start_node> <end_noderange>`
## DESCRIPTION
**l2traceroute** is a command that returns the layer 2 route for the configered interfaces in nodeattrib.
It can also be used with the -i and -e options to check against specific interfaces on the endpoints.
## PREREQUISITES
**l2traceroute** the net.<interface>.switch attributes have to be set on the end points if endpoint is not a switch
## OPTIONS
* ` -e` EFACE, --eface=INTERFACE
interface to check against for the second end point
* ` -i` INTERFACE, --interface=INTERFACE
interface to check against for the first end point
* ` -c` CUMULUS, --cumulus=CUMULUS
return layer 2 route through cumulus switches only
* `-h`, `--help`:
Show help message and exit
## EXAMPLES
* Checking route between two nodes:
`# l2traceroute_client n244 n1851`
`n244 to n1851: ['switch114']`
* Checking route from one node to multiple nodes:
`# l2traceroute_client n244 n1833,n1851`
`n244 to n1833: ['switch114', 'switch7', 'switch32', 'switch253', 'switch85', 'switch72', 'switch21', 'switch2', 'switch96', 'switch103', 'switch115']
n244 to n1851: ['switch114']`

View File

@@ -24,6 +24,8 @@ For a full list of attributes, run `nodeattrib <node> all` against a node.
If `-c` is specified, this will set the nodeattribute to a null value.
This is different from setting the value to an empty string.
Arbitrary custom attributes can also be created with the `custom.` prefix.
Attributes may be specified by wildcard, for example `net.*switch` will report
all attributes that begin with `net.` and end with `switch`.

View File

@@ -21,9 +21,15 @@ console process which will result in the console window closing.
## OPTIONS
* `-i N`, `--interval`:
For screenshot mode, fetch new screenshots and overwrite old screenshots every N seconds.
For example, `nodeconsole r3u[21:24] -tsi 3` will tile screenshots of r3u21 through r3u24 and
refresh them every 3 seconds.
* `-t`, `--tile`:
Use tmux to arrange consoles of the given noderange into a tiled layout on
the terminal screen
For text consoles, use tmux to arrange consoles of the given noderange into a tiled layout on
the terminal screen. If using 'screenshot' mode, divide the terminal and display the images
in a grid.
* `-l`, `--log`:
Perform a log reply on the current, local log in /var/log/confluent/consoles.
@@ -34,6 +40,14 @@ console process which will result in the console window closing.
Dump the log with Timpstamps on the current, local log in /var/log/confluent/consoles.
If in collective mode, this only makes sense to use on the current collective
manager at this time.
* `-s`, `--screenshot`:
Attempt to grab screenshot(s) and render using a terminal
image protocol. The image protocol defaults to kitty, and
can be selected by CONFLUENT_IMAGE_PROTOCOL environment variable.
Supported protocols are kitty, iterm, and, sixel (sixel only
if PySixel is installed). This only presents screenshots, there
is no input supported to graphical consoles from a terminal.
* `-w`, `--windowed`:
Open terminal windows for each node. The

View File

@@ -3,7 +3,7 @@ nodefirmware(8) -- Report firmware information on confluent nodes
## SYNOPSIS
`nodefirmware <noderange> [list][update [--backup <file>]]|[<components>]`
`nodefirmware <noderange> [list][updatestatus][update [--backup <file>]]|[<components>]`
## DESCRIPTION
@@ -17,6 +17,9 @@ not be relevant to redfish. Additionally, the Lenovo XCC makes certain
information available over IPMI that is not otherwise available (for example
the FPGA version where applicable).
The updatestatus argument will describe the state of firmware updates on the
nodes.
In the update form, it accepts a single file and attempts to update it using
the out of band facilities. Firmware updates can end in one of three states:

View File

@@ -11,7 +11,7 @@ nodegroupattrib(8) -- List or change confluent nodegroup attributes
## DESCRIPTION
`nodegroupattrip` queries the confluent server to get information about nodes.
`nodegroupattrib` queries the confluent server to get information about nodes.
In the simplest form, it simply takes the given group and lists the attributes of that group.
Contrasted with nodeattrib(8), settings managed by nodegroupattrib will be added

View File

@@ -0,0 +1,42 @@
nodel2traceroute(8) -- returns the layer 2 route through an Ethernet network managed by confluent given 2 end points.
==============================
## SYNOPSIS
`nodel2traceroute [options] <start_node> <end_noderange>`
## DESCRIPTION
**nodel2traceroute** is a command that returns the layer 2 route for the configered interfaces in nodeattrib.
It can also be used with the -i and -e options to check against specific interfaces on the endpoints. If the
--interface or --eface option are not used then the command will check for routes against all the defined
interfaces in nodeattrib (net.*.switch) for the nodes.
## PREREQUISITES
**nodel2traceroute** the net.<interface>.switch attributes have to be set on the end points if endpoint is not a switch
## OPTIONS
* ` -e` EFACE, --eface=INTERFACE
interface to check against for the second end point or end points if using checking against multiple nodes
* ` -i` INTERFACE, --interface=INTERFACE
interface to check against for the first end point
* ` -c` CUMULUS, --cumulus=CUMULUS
return layer 2 route through cumulus switches only
* `-h`, `--help`:
Show help message and exit
## EXAMPLES
* Checking route between two nodes:
`# nodel2traceroute n244 n1851`
`n244 to n1851: ['switch114']`
* Checking route from one node to multiple nodes:
`# nodel2traceroute n244 n1833,n1851`
`n244 to n1833: ['switch114', 'switch7', 'switch32', 'switch253', 'switch85', 'switch72', 'switch21', 'switch2', 'switch96', 'switch103', 'switch115']
n244 to n1851: ['switch114']`

View File

@@ -19,7 +19,9 @@ interval of 1 second is used.
## OPTIONS
* `-c`, `--csv`:
Organize output into CSV format, one sensor per column.
Organize output into CSV format, one sensor per column. Note that while normally nodesensors reports
sensors in order as returned by server, CSV output enforces consistency by sorting after receiving
the results, which may have a different ordering than non-CSV usage of nodesensors.
* `-i`, `--interval`=**SECONDS**:
Repeat data gathering waiting, waiting the specified time between samples. Unless `-n` is

View File

@@ -0,0 +1,74 @@
#!/usr/bin/python3
import glob
import gzip
import base64
import os
import subprocess
import sys
import tempfile
def collect_certificates(tmpdir):
certdata = ''
for cacert in glob.glob(f'{tmpdir}/*.pem'):
with open(cacert, 'r') as f:
certdata += f.read()
return certdata
def embed_certificates(incfg, certdata):
if not certdata:
raise Exception('No certificates found to embed')
incfg = incfg.replace('%CONFLUENTCERTCOLL%', certdata)
return incfg
def embed_identity(incfg, identityjson):
incfg = incfg.replace('%IDENTJSON%', identityjson)
return incfg
def embed_apiclient(incfg, apiclient):
with open(apiclient, 'r') as f:
apiclientdata = f.read()
compressed = gzip.compress(apiclientdata.encode())
encoded = base64.b64encode(compressed).decode()
incfg = incfg.replace('%APICLIENTZ64%', encoded)
return incfg
def embed_data(tmpdir, outfile):
templatefile = f'{tmpdir}/bfb.cfg.template'
with open(templatefile, 'r') as f:
incfg = f.read()
certdata = collect_certificates(tmpdir)
incfg = embed_certificates(incfg, certdata)
with open(f'{tmpdir}/identity.json', 'r') as f:
identityjson = f.read()
incfg = embed_identity(incfg, identityjson)
incfg = embed_apiclient(incfg, f'{tmpdir}/../apiclient')
with open(outfile, 'w') as f:
f.write(incfg)
def get_identity_json(node):
identity_file = f'/var/lib/confluent/private/site/identity_files/{node}.json'
try:
with open(identity_file, 'r') as f:
return f.read()
except FileNotFoundError:
return None
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Usage: bfb-autoinstall <node> <bfbfile> <rshim>")
sys.exit(1)
node = sys.argv[1]
bfbfile = sys.argv[2]
rshim = sys.argv[3]
os.chdir(os.path.dirname(os.path.abspath(__file__)))
currdir = os.getcwd()
tempdir = tempfile.mkdtemp(prefix=f'bfb-autoinstall-{node}-')
embed_data(f'{currdir}/{node}', f'{tempdir}/bfb.cfg')
subprocess.check_call(['bfb-install', '-b', bfbfile, '-c', f'{tempdir}/bfb.cfg', '-r', rshim])

View File

@@ -0,0 +1,74 @@
#!/usr/bin/python3
import glob
import gzip
import base64
import os
import subprocess
import sys
import tempfile
def collect_certificates(tmpdir):
certdata = ''
for cacert in glob.glob(f'{tmpdir}/*.pem'):
with open(cacert, 'r') as f:
certdata += f.read()
return certdata
def embed_certificates(incfg, certdata):
if not certdata:
raise Exception('No certificates found to embed')
incfg = incfg.replace('%CONFLUENTCERTCOLL%', certdata)
return incfg
def embed_identity(incfg, identityjson):
incfg = incfg.replace('%IDENTJSON%', identityjson)
return incfg
def embed_apiclient(incfg, apiclient):
with open(apiclient, 'r') as f:
apiclientdata = f.read()
compressed = gzip.compress(apiclientdata.encode())
encoded = base64.b64encode(compressed).decode()
incfg = incfg.replace('%APICLIENTZ64%', encoded)
return incfg
def embed_data(tmpdir, outfile):
templatefile = f'{tmpdir}/bfb.cfg.template'
with open(templatefile, 'r') as f:
incfg = f.read()
certdata = collect_certificates(tmpdir)
incfg = embed_certificates(incfg, certdata)
with open(f'{tmpdir}/identity.json', 'r') as f:
identityjson = f.read()
incfg = embed_identity(incfg, identityjson)
incfg = embed_apiclient(incfg, f'{tmpdir}/../apiclient')
with open(outfile, 'w') as f:
f.write(incfg)
def get_identity_json(node):
identity_file = f'/var/lib/confluent/private/site/identity_files/{node}.json'
try:
with open(identity_file, 'r') as f:
return f.read()
except FileNotFoundError:
return None
if __name__ == '__main__':
if len(sys.argv) != 4:
print("Usage: bfb-autoinstall <node> <bfbfile> <rshim>")
sys.exit(1)
node = sys.argv[1]
bfbfile = sys.argv[2]
rshim = sys.argv[3]
os.chdir(os.path.dirname(os.path.abspath(__file__)))
currdir = os.getcwd()
tempdir = tempfile.mkdtemp(prefix=f'bfb-autoinstall-{node}-')
embed_data(f'{currdir}/{node}', f'{tempdir}/bfb.cfg')
subprocess.check_call(['bfb-install', '-b', bfbfile, '-c', f'{tempdir}/bfb.cfg', '-r', rshim])

View File

@@ -0,0 +1,76 @@
function bfb_modify_os() {
echo 'ubuntu:!' | chpasswd -e
mkdir -p /mnt/opt/confluent/bin/
cat > /mnt/opt/confluent/bin/confluentbootstrap.sh << 'END_OF_EMBED'
#!/bin/bash
cat > /usr/local/share/ca-certificates/confluent.crt << 'END_OF_CERTS'
%CONFLUENTCERTCOLL%
END_OF_CERTS
update-ca-certificates
mkdir -p /opt/confluent/bin /etc/confluent/
cp /usr/local/share/ca-certificates/confluent.crt /etc/confluent/ca.pem
cat > /opt/confluent/bin/apiclient.gz.b64 << 'END_OF_CLIENT'
%APICLIENTZ64%
END_OF_CLIENT
base64 -d /opt/confluent/bin/apiclient.gz.b64 | gunzip > /opt/confluent/bin/apiclient
cat > /etc/confluent/ident.json << 'END_OF_IDENT'
%IDENTJSON%
END_OF_IDENT
python3 /opt/confluent/bin/apiclient -i /etc/confluent/ident.json /confluent-api/self/deploycfg2 > /etc/confluent/confluent.deploycfg
PROFILE=$(grep ^profile: /etc/confluent/confluent.deploycfg |awk '{print $2}')
ROOTPASS=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg | awk '{print $2}'|grep -v null)
if [ -n "$ROOTPASS" ]; then
echo root:$ROOTPASS | chpasswd -e
echo "ubuntu:$ROOTPASS" | chpasswd -e
else
echo 'ubuntu:!' | chpasswd -e
fi
cntmp=$(mktemp -d)
cd "$cntmp" || { echo "Failed to cd to temporary directory $cntmp"; exit 1; }
touch /etc/confluent/confluent.deploycfg
python3 /opt/confluent/bin/apiclient /confluent-public/os/$PROFILE/scripts/confignet > confignet
python3 confignet
cd -
rm -rf "$cntmp"
python3 /opt/confluent/bin/apiclient /confluent-public/os/$PROFILE/scripts/functions > /etc/confluent/functions
bash /etc/confluent/functions run_remote setupssh
for cert in /etc/ssh/ssh*-cert.pub; do
if [ -s $cert ]; then
echo HostCertificate $cert >> /etc/ssh/sshd_config.d/90-confluent.conf
fi
done
mkdir -p /var/log/confluent
chmod 700 /var/log/confluent
touch /var/log/confluent/confluent-firstboot.log
touch /var/log/confluent/confluent-post.log
chmod 600 /var/log/confluent/confluent-post.log
chmod 600 /var/log/confluent/confluent-firstboot.log
exec >> /var/log/confluent/confluent-post.log
exec 2>> /var/log/confluent/confluent-post.log
bash /etc/confluent/functions run_remote_python syncfileclient
bash /etc/confluent/functions run_remote_parts post.d
bash /etc/confluent/functions run_remote_config post.d
exec >> /var/log/confluent/confluent-firstboot.log
exec 2>> /var/log/confluent/confluent-firstboot.log
bash /etc/confluent/functions run_remote_parts firstboot.d
bash /etc/confluent/functions run_remote_config firstboot.d
python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged'
python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: complete'
systemctl disable confluentbootstrap
rm /etc/systemd/system/confluentbootstrap.service
END_OF_EMBED
chmod +x /mnt/opt/confluent/bin/confluentbootstrap.sh
cat > /mnt/etc/systemd/system/confluentbootstrap.service << EOS
[Unit]
Description=First Boot Process
Requires=network-online.target
After=network-online.target
[Service]
ExecStart=/opt/confluent/bin/confluentbootstrap.sh
[Install]
WantedBy=multi-user.target
EOS
chroot /mnt systemctl enable confluentbootstrap
}

View File

@@ -0,0 +1,125 @@
#!/usr/bin/python3
import os
import sys
import tempfile
import glob
import shutil
import shlex
import subprocess
import select
sys.path.append('/opt/lib/confluent/python')
import confluent.sortutil as sortutil
import confluent.client as client
def prep_outdir(node):
tmpdir = tempfile.mkdtemp()
for certfile in glob.glob('/var/lib/confluent/public/site/tls/*.pem'):
basename = os.path.basename(certfile)
destfile = os.path.join(tmpdir, basename)
shutil.copy2(certfile, destfile)
subprocess.check_call(shlex.split(f'confetty set /nodes/{node}/deployment/ident_image=create'))
shutil.copy2(f'/var/lib/confluent/private/identity_files/{node}.json', os.path.join(tmpdir, 'identity.json'))
return tmpdir
def exec_bfb_install(host, nodetorshim, bfbfile, installprocs, pipedesc, all, poller):
remotedir = subprocess.check_output(shlex.split(f'ssh {host} mktemp -d /tmp/bfb.XXXXXX')).decode().strip()
bfbbasename = os.path.basename(bfbfile)
subprocess.check_call(shlex.split(f'rsync -avz --info=progress2 {bfbfile} {host}:{remotedir}/{bfbbasename}'))
subprocess.check_call(shlex.split(f'rsync -avc --info=progress2 /opt/lib/confluent/osdeploy/bluefield/hostscripts/ {host}:{remotedir}/'))
for node in nodetorshim:
rshim = nodetorshim[node]
nodeoutdir = prep_outdir(node)
nodeprofile = subprocess.check_output(shlex.split(f'nodeattrib {node} deployment.pendingprofile')).decode().strip().split(':', 2)[2].strip()
shutil.copy2(f'/var/lib/confluent/public/os/{nodeprofile}/bfb.cfg.template', os.path.join(nodeoutdir, 'bfb.cfg.template'))
subprocess.check_call(shlex.split(f'rsync -avz {nodeoutdir}/ {host}:{remotedir}/{node}/'))
shutil.rmtree(nodeoutdir)
run_cmdv(node, shlex.split(f'ssh {host} sh /etc/confluent/functions confluentpython {remotedir}/bfb-autoinstall {node} {remotedir}/{bfbbasename} {rshim}'), all, poller, pipedesc)
def run_cmdv(node, cmdv, all, poller, pipedesc):
try:
nopen = subprocess.Popen(
cmdv, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
if e.errno == 2:
sys.stderr.write('{0}: Unable to find local executable file "{1}"\n'.format(node, cmdv[0]))
return
raise
pipedesc[nopen.stdout.fileno()] = {'node': node, 'popen': nopen,
'type': 'stdout', 'file': nopen.stdout}
pipedesc[nopen.stderr.fileno()] = {'node': node, 'popen': nopen,
'type': 'stderr', 'file': nopen.stderr}
all.add(nopen.stdout)
poller.register(nopen.stdout, select.EPOLLIN)
all.add(nopen.stderr)
poller.register(nopen.stderr, select.EPOLLIN)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} <host> <bfbfile> <node1:rshim1> [<node2:rshim2> ...]')
sys.exit(1)
host = sys.argv[1]
bfbfile = sys.argv[2]
nodetorshim = {}
for arg in sys.argv[3:]:
node, rshim = arg.split(':')
nodetorshim[node] = rshim
installprocs = {}
pipedesc = {}
all = set()
poller = select.epoll()
exec_bfb_install(host, nodetorshim, bfbfile, installprocs, pipedesc, all, poller)
rdy = poller.poll(10)
pendingexecs = []
exitcode = 0
while all:
pernodeout = {}
for r in rdy:
r = r[0]
desc = pipedesc[r]
r = desc['file']
node = desc['node']
data = True
singlepoller = select.epoll()
singlepoller.register(r, select.EPOLLIN)
while data and singlepoller.poll(0):
data = r.readline()
if data:
if desc['type'] == 'stdout':
if node not in pernodeout:
pernodeout[node] = []
pernodeout[node].append(data)
else:
data = client.stringify(data)
sys.stderr.write('{0}: {1}'.format(node, data))
sys.stderr.flush()
else:
pop = desc['popen']
ret = pop.poll()
if ret is not None:
exitcode = exitcode | ret
all.discard(r)
poller.unregister(r)
r.close()
if desc['type'] == 'stdout' and pendingexecs:
node, cmdv = pendingexecs.popleft()
run_cmdv(node, cmdv, all, poller, pipedesc)
singlepoller.close()
for node in sortutil.natural_sort(pernodeout):
for line in pernodeout[node]:
line = client.stringify(line)
sys.stdout.write('{0}: {1}'.format(node, line))
sys.stdout.flush()
if all:
rdy = poller.poll(10)

View File

@@ -3,6 +3,7 @@ try:
import http.client as client
except ImportError:
import httplib as client
import base64
import ctypes
import ctypes.util
import glob
@@ -14,6 +15,14 @@ import ssl
import sys
import struct
import time
import re
import hashlib
try:
import json
import hmac
except ImportError:
json = None
hmac = None
class InvalidApiKey(Exception):
pass
@@ -38,40 +47,115 @@ c_crypt.restype = ctypes.c_char_p
def get_my_addresses():
nlhdrsz = struct.calcsize('IHHII')
ifaddrsz = struct.calcsize('BBBBI')
# RTM_GETADDR = 22
# nlmsghdr struct: u32 len, u16 type, u16 flags, u32 seq, u32 pid
nlhdr = struct.pack('IHHII', nlhdrsz + ifaddrsz, 22, 0x301, 0, 0)
# ifaddrmsg struct: u8 family, u8 prefixlen, u8 flags, u8 scope, u32 index
ifaddrmsg = struct.pack('BBBBI', 0, 0, 0, 0, 0)
s = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, socket.NETLINK_ROUTE)
s.bind((0, 0))
s.sendall(nlhdr + ifaddrmsg)
addrs = []
while True:
pdata = s.recv(65536)
v = memoryview(pdata)
if struct.unpack('H', v[4:6])[0] == 3: # netlink done message
break
while len(v):
length, typ = struct.unpack('IH', v[:6])
if typ == 20:
fam, plen, _, scope, ridx = struct.unpack('BBBBI', v[nlhdrsz:nlhdrsz+ifaddrsz])
if scope in (253, 0):
rta = v[nlhdrsz+ifaddrsz:length]
while len(rta):
rtalen, rtatyp = struct.unpack('HH', rta[:4])
if rtalen < 4:
break
if rtatyp == 1:
addrs.append((fam, rta[4:rtalen], plen, ridx))
rta = rta[msg_align(rtalen):]
v = v[msg_align(length):]
for ifa in get_ifaddrs():
if ifa[0] == 'ip':
addrs.append((ifa[1], ifa[2], ifa[3]))
return addrs
def get_mac_addresses():
macs = []
for ifa in get_ifaddrs():
if ifa[0] == 'ETHER':
macs.append((ifa[1], ifa[2]))
return macs
def scan_confluents():
def get_ifaddrs():
class sockaddr(ctypes.Structure):
_fields_ = [
('sa_family', ctypes.c_uint16),
('sa_data', ctypes.c_ubyte * 14),
]
class sockaddr_in(ctypes.Structure):
_fields_ = [
('sin_family', ctypes.c_uint16),
('sin_port', ctypes.c_uint16),
('sin_addr', ctypes.c_ubyte * 4),
('sin_zero', ctypes.c_ubyte * 8),
]
class sockaddr_in6(ctypes.Structure):
_fields_ = [
('sin6_family', ctypes.c_uint16),
('sin6_port', ctypes.c_uint16),
('sin6_flowinfo', ctypes.c_uint32),
('sin6_addr', ctypes.c_ubyte * 16),
('sin6_scope_id', ctypes.c_uint32),
]
class sockaddr_ll(ctypes.Structure):
_fields_ = [
('sll_family', ctypes.c_uint16),
('sll_protocol', ctypes.c_uint16),
('sll_ifindex', ctypes.c_int32),
('sll_hatype', ctypes.c_uint16),
('sll_pkttype', ctypes.c_uint8),
('sll_halen', ctypes.c_uint8),
('sll_addr', ctypes.c_ubyte * 8),
]
class ifaddrs(ctypes.Structure):
pass
ifaddrs._fields_ = [
('ifa_next', ctypes.POINTER(ifaddrs)),
('ifa_name', ctypes.c_char_p),
('ifa_flags', ctypes.c_uint),
('ifa_addr', ctypes.POINTER(sockaddr)),
('ifa_netmask', ctypes.POINTER(sockaddr)),
('ifa_ifu', ctypes.POINTER(sockaddr)),
('ifa_data', ctypes.c_void_p),
]
libc = ctypes.CDLL(ctypes.util.find_library('c'))
libc.getifaddrs.argtypes = [ctypes.POINTER(ctypes.POINTER(ifaddrs))]
libc.getifaddrs.restype = ctypes.c_int
libc.freeifaddrs.argtypes = [ctypes.POINTER(ifaddrs)]
libc.freeifaddrs.restype = None
ifap = ctypes.POINTER(ifaddrs)()
result = libc.getifaddrs(ctypes.pointer(ifap))
if result != 0:
return []
addresses = []
ifa = ifap
try:
while ifa:
if ifa.contents.ifa_addr:
family = ifa.contents.ifa_addr.contents.sa_family
name = ifa.contents.ifa_name.decode('utf-8') if ifa.contents.ifa_name else None
if family in (socket.AF_INET, socket.AF_INET6):
# skip loopback and non-multicast interfaces
if ifa.contents.ifa_flags & 8 or not ifa.contents.ifa_flags & 0x1000:
ifa = ifa.contents.ifa_next
continue
if family == socket.AF_INET:
addr_ptr = ctypes.cast(ifa.contents.ifa_addr, ctypes.POINTER(sockaddr_in))
addr_bytes = bytes(addr_ptr.contents.sin_addr)
if_index = socket.if_nametoindex(name) if name else 0
addresses.append(('ip', family, addr_bytes, if_index))
elif family == socket.AF_INET6:
addr_ptr = ctypes.cast(ifa.contents.ifa_addr, ctypes.POINTER(sockaddr_in6))
addr_bytes = bytes(addr_ptr.contents.sin6_addr)
scope_id = addr_ptr.contents.sin6_scope_id
addresses.append(('ip', family, addr_bytes, scope_id))
elif family == socket.AF_PACKET:
addr_ptr = ctypes.cast(ifa.contents.ifa_addr, ctypes.POINTER(sockaddr_ll))
halen = addr_ptr.contents.sll_halen
if addr_ptr.contents.sll_hatype in (1, 32) and halen > 0: # ARPHRD_ETHER or ARPHRD_INFINIBAND
if addr_ptr.contents.sll_hatype == 1 and addr_ptr.contents.sll_addr[0] & 2: # skip locally administered MACs
ifa = ifa.contents.ifa_next
continue
mac_bytes = bytes(addr_ptr.contents.sll_addr[:halen])
macaddr = ':'.join('{:02x}'.format(b) for b in mac_bytes)
addresses.append(('ETHER', name, macaddr))
ifa = ifa.contents.ifa_next
finally:
libc.freeifaddrs(ifap)
return addresses
def scan_confluents(confuuid=None):
srvs = {}
s6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
@@ -83,18 +167,24 @@ def scan_confluents():
s4.bind(('0.0.0.0', 1900))
doneidxs = set([])
msg = 'M-SEARCH * HTTP/1.1\r\nST: urn:xcat.org:service:confluent:'
with open('/etc/confluent/confluent.deploycfg') as dcfg:
for line in dcfg.read().split('\n'):
if line.startswith('confluent_uuid:'):
confluentuuid = line.split(': ')[1]
msg += '/confluentuuid=' + confluentuuid
break
with open('/sys/devices/virtual/dmi/id/product_uuid') as uuidin:
msg += '/uuid=' + uuidin.read().strip()
for addrf in glob.glob('/sys/class/net/*/address'):
with open(addrf) as addrin:
hwaddr = addrin.read().strip()
msg += '/mac=' + hwaddr
if not confuuid and os.path.exists('/etc/confluent/confluent.deploycfg'):
with open('/etc/confluent/confluent.deploycfg') as dcfg:
for line in dcfg.read().split('\n'):
if line.startswith('confluent_uuid:'):
confluentuuid = line.split(': ')[1]
msg += '/confluentuuid=' + confluentuuid
break
if not confuuid and os.path.exists('/confluent_uuid'):
with open('/confluent_uuid') as cuuidin:
confluentuuid = cuuidin.read().strip()
msg += '/confluentuuid=' + confluentuuid
try:
with open('/sys/devices/virtual/dmi/id/product_uuid') as uuidin:
msg += '/uuid=' + uuidin.read().strip()
except Exception:
pass
for iface, hwaddr in get_mac_addresses():
msg += '/mac=' + hwaddr
msg = msg.encode('utf8')
for addr in get_my_addresses():
if addr[0] == socket.AF_INET6:
@@ -122,6 +212,7 @@ def scan_confluents():
srvlist = []
if r:
r = r[0]
nodename = None
while r:
for s in r:
(rsp, peer) = s.recvfrom(9000)
@@ -129,6 +220,7 @@ def scan_confluents():
current = None
for line in rsp:
if line.startswith(b'NODENAME: '):
nodename = line.replace(b'NODENAME: ', b'').strip().decode('utf8')
current = {}
elif line.startswith(b'DEFAULTNET: 1'):
current['isdefault'] = True
@@ -140,20 +232,37 @@ def scan_confluents():
if currip.startswith('fe80::') and '%' not in currip:
currip = '{0}%{1}'.format(currip, peer[-1])
srvs[currip] = current
srvlist.append(currip)
if currip not in srvlist:
srvlist.append(currip)
r = select.select((s4, s6), (), (), 2)
if r:
r = r[0]
if not os.path.exists('/etc/confluent/confluent.info'):
with open('/etc/confluent/confluent.info', 'w+') as cinfo:
if nodename:
cinfo.write('NODENAME: {0}\n'.format(nodename))
for srv in srvlist:
cinfo.write('MANAGER: {0}\n'.format(srv))
return srvlist, srvs
def get_net_apikey(nodename, mgr):
def get_net_apikey(nodename, mgr, hmackey=None, confuuid=None):
alpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789./'
newpass = ''.join([alpha[x >> 2] for x in bytearray(os.urandom(32))])
salt = '$5$' + ''.join([alpha[x >> 2] for x in bytearray(os.urandom(8))])
newpass = newpass.encode('utf8')
salt = salt.encode('utf8')
crypted = c_crypt(newpass, salt)
if hmackey:
hmacvalue = hmac.new(hmackey.encode('utf8'), crypted, hashlib.sha256).digest()
hmacvalue = base64.b64encode(hmacvalue).decode('utf8')
client = HTTPSClient(host=mgr, phmac=hmacvalue, nodename=nodename, confuuid=confuuid)
try:
status, rsp = client.grab_url_with_status('/confluent-api/self/registerapikey', data=crypted, returnrsp=True)
if status == 200:
return newpass.decode('utf8')
except Exception:
pass
for addrinfo in socket.getaddrinfo(mgr, 13001, 0, socket.SOCK_STREAM):
try:
clisock = socket.socket(addrinfo[0], addrinfo[1])
@@ -191,7 +300,7 @@ def get_net_apikey(nodename, mgr):
return ''
def get_apikey(nodename, hosts, errout=None):
def get_apikey(nodename, hosts, errout=None, hmackey=None, confuuid=None):
apikey = ""
if os.path.exists('/etc/confluent/confluent.apikey'):
apikey = open('/etc/confluent/confluent.apikey').read().strip()
@@ -200,16 +309,16 @@ def get_apikey(nodename, hosts, errout=None):
while not apikey:
for host in hosts:
try:
apikey = get_net_apikey(nodename, host)
apikey = get_net_apikey(nodename, host, hmackey=hmackey, confuuid=confuuid)
except OSError:
apikey = None
if apikey:
break
else:
srvlist, _ = scan_confluents()
srvlist, _ = scan_confluents(confuuid=confuuid)
for host in srvlist:
try:
apikey = get_net_apikey(nodename, host)
apikey = get_net_apikey(nodename, host, hmackey=hmackey, confuuid=confuuid)
except OSError:
apikey = None
if apikey:
@@ -227,34 +336,43 @@ def get_apikey(nodename, hosts, errout=None):
return apikey
class HTTPSClient(client.HTTPConnection, object):
def __init__(self, usejson=False, port=443, host=None, errout=None, phmac=None, checkonly=False):
def __init__(self, usejson=False, port=443, host=None, errout=None, phmac=None, checkonly=False, hmackey=None, nodename=None, confuuid=None):
self.ignorehosts = set([])
self.phmac = phmac
self.hmackey = hmackey
self.confuuid = confuuid
self.errout = None
self.stdheaders = {}
if nodename:
self.stdheaders['CONFLUENT_NODENAME'] = nodename
if errout:
self.errout = open(errout, 'w')
self.errout.flush()
self.stdheaders = {}
mgtiface = None
if usejson:
self.stdheaders['ACCEPT'] = 'application/json'
if host:
self.hosts = [host]
with open('/etc/confluent/confluent.info') as cinfo:
info = cinfo.read().split('\n')
for line in info:
if line.startswith('NODENAME:'):
node = line.split(' ')[1]
self.stdheaders['CONFLUENT_NODENAME'] = node
if not nodename:
with open('/etc/confluent/confluent.info') as cinfo:
info = cinfo.read().split('\n')
for line in info:
if line.startswith('NODENAME:'):
nodename = line.split(' ')[1]
self.stdheaders['CONFLUENT_NODENAME'] = nodename
else:
self.hosts = []
info = open('/etc/confluent/confluent.info').read().split('\n')
try:
info = open('/etc/confluent/confluent.info').read().split('\n')
except Exception:
info = []
havedefault = '0'
plainhost = ''
for line in info:
host = ''
if line.startswith('NODENAME:'):
node = line.split(' ')[1]
self.stdheaders['CONFLUENT_NODENAME'] = node
nodename = line.split(' ')[1]
self.stdheaders['CONFLUENT_NODENAME'] = nodename
if line.startswith('MANAGER:') and not host:
host = line.split(' ')[1]
self.hosts.append(host)
@@ -289,15 +407,14 @@ class HTTPSClient(client.HTTPConnection, object):
if plainhost and not self.hosts:
self.hosts.append(plainhost)
if self.phmac:
with open(phmac, 'r') as hmacin:
self.stdheaders['CONFLUENT_CRYPTHMAC'] = hmacin.read()
self.stdheaders['CONFLUENT_CRYPTHMAC'] = self.phmac
elif not checkonly:
self.stdheaders['CONFLUENT_APIKEY'] = get_apikey(node, self.hosts, errout=self.errout)
self.stdheaders['CONFLUENT_APIKEY'] = get_apikey(nodename, self.hosts, errout=self.errout, hmackey=hmackey, confuuid=self.confuuid)
if mgtiface:
self.stdheaders['CONFLUENT_MGTIFACE'] = mgtiface
self.port = port
self.host = None
self.node = node
self.node = nodename
host = self.check_connections()
client.HTTPConnection.__init__(self, host, port)
self.connect()
@@ -315,6 +432,8 @@ class HTTPSClient(client.HTTPConnection, object):
ctx.check_hostname = True
for timeo in (0.1, 5):
for host in hosts:
if host in self.ignorehosts:
continue
try:
addrinf = socket.getaddrinfo(host, self.port)[0]
psock = socket.socket(addrinf[0])
@@ -335,7 +454,7 @@ class HTTPSClient(client.HTTPConnection, object):
continue
break
if not foundsrv:
srvlist, srvs = scan_confluents()
srvlist, srvs = scan_confluents(self.confuuid)
hosts = []
for srv in srvlist:
if srvs[srv].get('isdefault', False):
@@ -409,11 +528,101 @@ class HTTPSClient(client.HTTPConnection, object):
with open('/etc/confluent/confluent.apikey', 'w+') as akfile:
akfile.write('')
self.stdheaders['CONFLUENT_APIKEY'] = get_apikey(
self.node, [self.host], errout=self.errout)
self.node, [self.host], errout=self.errout, hmackey=self.hmackey, confuuid=self.confuuid)
if rsp.status == 503: # confluent is down, but the server running confluent is otherwise up
authed = False
self.ignorehosts.add(self.host)
host = self.check_connections()
client.HTTPConnection.__init__(self, host, self.port)
raise Exception(rsp.read())
def get_current_vmnic_vswitch():
uplinkmatch = re.compile(r'^\s*Uplinks:\s*(.*)')
switchmatch = re.compile(r'^\s*Name:\s*(.*)')
vswinfo = subprocess.check_output(['localcli', 'network', 'vswitch', 'standard', 'list']).decode()
vmnic = None
vswitch_name = None
for info in vswinfo.split('\n'):
name_match = switchmatch.match(info)
if name_match:
vswitch_name = name_match.group(1).strip()
upinfo = uplinkmatch.match(info)
if upinfo:
vmnic = upinfo.group(1).strip()
if vmnic and 'vusb0' not in vmnic:
return vswitch_name, vmnic
return vswitch_name, vmnic
def get_available_nics():
nicinfo = subprocess.check_output(['localcli', 'network', 'nic', 'list']).decode('utf8').split('\n')
available_nics = {}
# Skip headers and separators
parsing_started = False
for line in nicinfo:
if re.match(r'^-+', line):
parsing_started = True
continue
if not parsing_started or not line.strip():
continue
parts = re.split(r'\s{2,}', line.strip())
if len(parts) >= 5:
nic_name = parts[0]
nic_status = parts[4] # "Link Status" is the 5th field
available_nics[nic_name] = nic_status
return available_nics
def is_esxi():
return os.path.isdir("/etc/vmware")
def fix_vswitch():
if is_esxi():
start_time = time.time()
while True:
current_vswitch, current_vmnic = get_current_vmnic_vswitch()
if current_vswitch is None:
raise RuntimeError("Panic: current vswitch is None")
if current_vmnic is None:
raise RuntimeError("Panic: current vmnic is None")
available_nics = get_available_nics()
if current_vmnic and available_nics.get(current_vmnic) == 'Up':
break
new_vmnic = next((nic for nic, status in available_nics.items() if status == 'Up'), None)
if new_vmnic and new_vmnic != current_vmnic:
subprocess.check_call(['localcli', 'network', 'vswitch', 'standard', 'uplink', 'remove',
'--uplink-name', current_vmnic, '--vswitch-name', current_vswitch])
subprocess.check_call(['localcli', 'network', 'vswitch', 'standard', 'uplink', 'add',
'--uplink-name', new_vmnic, '--vswitch-name', current_vswitch])
elif not new_vmnic:
if time.time() - start_time > 300:
break
time.sleep(5)
time.sleep(5)
if __name__ == '__main__':
data = None
if '-f' in sys.argv:
try:
fix_vswitch()
except Exception as e:
print("fix_vswitch() error: {}".format(e))
sys.argv.remove('-f')
sys.exit(0)
usejson = False
if '-j' in sys.argv:
usejson = True
@@ -448,8 +657,24 @@ if __name__ == '__main__':
phmac = sys.argv.index('-p')
sys.argv.pop(phmac)
phmac = sys.argv.pop(phmac)
with open(phmac, 'r') as hmacin:
phmac = hmacin.read()
except ValueError:
phmac = None
try:
identfile = sys.argv.index('-i')
sys.argv.pop(identfile)
identfile = sys.argv.pop(identfile)
with open(identfile) as idin:
data = idin.read()
identinfo = json.loads(data)
nodename = identinfo.get('nodename', None)
hmackey = identinfo.get('apitoken', None)
confuuid = identinfo.get('confluent_uuid', None)
except ValueError:
hmackey = None
nodename = None
confuuid = None
try:
checkonly = False
idxit = sys.argv.index('-c')
@@ -461,20 +686,21 @@ if __name__ == '__main__':
data = open(sys.argv[-1]).read()
if outbin:
with open(outbin, 'ab+') as outf:
reader = HTTPSClient(usejson=usejson, errout=errout).grab_url(
reader = HTTPSClient(usejson=usejson, errout=errout, hmackey=hmackey, nodename=nodename, confuuid=confuuid).grab_url(
sys.argv[1], data, returnrsp=True)
chunk = reader.read(16384)
while chunk:
outf.write(chunk)
chunk = reader.read(16384)
sys.exit(0)
client = HTTPSClient(usejson, errout=errout, phmac=phmac, checkonly=checkonly)
mclient = HTTPSClient(usejson, errout=errout, phmac=phmac, checkonly=checkonly, hmackey=hmackey, nodename=nodename, confuuid=confuuid)
if waitfor:
status = 201
while status != waitfor:
status, rsp = client.grab_url_with_status(sys.argv[1], data)
status, rsp = mclient.grab_url_with_status(sys.argv[1], data)
sys.stdout.write(rsp.decode())
elif checkonly:
sys.stdout.write(client.check_connections())
sys.stdout.write(mclient.check_connections())
else:
sys.stdout.write(client.grab_url(sys.argv[1], data).decode())
sys.stdout.write(mclient.grab_url(sys.argv[1], data).decode())

View File

@@ -0,0 +1,119 @@
#!/usr/bin/python3
# This script evaluates whether firmware redirection is likely. It uses three cues:
# - Does the system offer up SPCR? This would indicate that the firmware is doing serial output.
# Otherwise, there's no indication that the firmware cares about serial console.
# - Is the system EFI? BIOS implementations may not intercept text draw calls after POST exit,
# thus even when BIOS tells us serial port is in use, it may not be doing anything when
# grub would be running
# - Is the serial port connected? In the event that firmware indicates serial port, but
# serial port is not reporting DCD, then it doesn't look like a comfortable enough scenario
import fcntl
import os
import os.path
import struct
import subprocess
import termios
addrtoname = {
0x3f8: '/dev/ttyS0',
0x2f8: '/dev/ttyS1',
0x3e8: '/dev/ttyS2',
0x2e8: '/dev/ttyS3',
}
speedmap = {
0: None,
3: 9600,
4: 19200,
6: 57600,
7: 115200,
}
termiobaud = {
9600: termios.B9600,
19200: termios.B19200,
57600: termios.B57600,
115200: termios.B115200,
}
def deserialize_grub_rh():
if 'console=ttyS' in open('/proc/cmdline').read():
return None # User manually indicated serial config
# they own the grub behavior too for now
grublines = []
with open('/etc/default/grub') as grubin:
grublines = grubin.read().split('\n')
with open('/etc/default/grub', 'w') as grubout:
for grubline in grublines:
if grubline.startswith('GRUB_TERMINAL'):
grubline = grubline.replace('serial ', '')
grubout.write(grubline + '\n')
subprocess.check_call(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg'])
def fixup_ubuntu_grub_serial():
# Ubuntu aggressively tries to graphics up
# grub. We will counter that for serial
# They also aggressively hide UI and
# block ability to interject. We will
# compromise and lean on nodeboot <node> setup
# as a means to give someone reasonable shot at
# the short timeout
with open('/etc/default/grub') as grubin:
grublines = grubin.read().split('\n')
with open('/etc/default/grub', 'w') as grubout:
for grubline in grublines:
if grubline.startswith('GRUB_TIMEOUT_STYLE=hidden'):
grubline = 'GRUB_TIMEOUT_STYLE=menu'
elif grubline.startswith('GRUB_TIMEOUT=0'):
grubline = 'GRUB_TIMEOUT=2'
elif grubline.startswith('#GRUB_TERMINAL=console'):
grubline = grubline.replace('#', '')
grubout.write(grubline + '\n')
subprocess.check_call(['update-grub'])
def get_serial_config():
if not os.path.exists('/sys/firmware/efi'):
return None
if not os.path.exists('/sys/firmware/acpi/tables/SPCR'):
return None
spcr = open("/sys/firmware/acpi/tables/SPCR", "rb")
spcr = bytearray(spcr.read())
if spcr[8] != 2 or spcr[36] != 0 or spcr[40] != 1:
return None
address = struct.unpack('<Q', spcr[44:52])[0]
tty = None
try:
tty = addrtoname[address]
except KeyError:
return None
retval = { 'tty': tty }
try:
retval['speed'] = speedmap[spcr[58]]
except KeyError:
return None
if retval['speed']:
ttyf = os.open(tty, os.O_RDWR | os.O_NOCTTY)
currattr = termios.tcgetattr(ttyf)
currattr[4:6] = [0, termiobaud[retval['speed']]]
termios.tcsetattr(ttyf, termios.TCSANOW, currattr)
retval['connected'] = bool(struct.unpack('<I', fcntl.ioctl(
ttyf, termios.TIOCMGET, '\x00\x00\x00\x00'))[0] & termios.TIOCM_CAR)
os.close(ttyf)
return retval
def main():
autoconscfg = get_serial_config()
if not autoconscfg or not autoconscfg['connected']:
return
if os.path.exists('/etc/redhat-release'): # redhat family
deserialize_grub_rh()
elif os.path.exists('/etc/os-release'):
with open('/etc/os-release') as osr:
if 'Ubuntu' in osr.read():
fixup_ubuntu_grub_serial()
if __name__ == '__main__':
main()

View File

@@ -164,6 +164,9 @@ class NetplanManager(object):
if curraddr not in currips:
needcfgwrite = True
currips.append(curraddr)
if stgs.get('mtu', None):
devdict = self.getcfgarrpath([devname])
devdict['mtu'] = int(stgs['mtu'])
gws = []
gws.append(stgs.get('ipv4_gateway', None))
gws.append(stgs.get('ipv6_gateway', None))
@@ -301,6 +304,12 @@ class WickedManager(object):
class NetworkManager(object):
bondtypes = {
'lacp': '802.3ad',
'loadbalance': 'balance-alb',
'roundrobin': 'balance-rr',
'activebackup': 'active-backup',
}
def __init__(self, devtypes, deploycfg):
self.deploycfg = deploycfg
self.connections = {}
@@ -361,7 +370,7 @@ class NetworkManager(object):
args.append(bondcfg[parm])
subprocess.check_call(['nmcli', 'c', 'm', team] + args)
def apply_configuration(self, cfg):
def apply_configuration(self, cfg, lastchance=False):
cmdargs = {}
cmdargs['connection.autoconnect'] = 'yes'
stgs = cfg['settings']
@@ -375,6 +384,8 @@ class NetworkManager(object):
cmdargs['ipv4.gateway'] = stgs['ipv4_gateway']
if stgs.get('ipv6_gateway', None):
cmdargs['ipv6.gateway'] = stgs['ipv6_gateway']
if stgs.get('mtu', None):
cmdargs['802-3-ethernet.mtu'] = stgs['mtu']
dnsips = self.deploycfg.get('nameservers', [])
if not dnsips:
dnsips = []
@@ -400,10 +411,10 @@ class NetworkManager(object):
cargs = []
for arg in cmdargs:
cargs.append(arg)
cargs.append(cmdargs[arg])
if stgs['team_mode'] == 'lacp':
stgs['team_mode'] = '802.3ad'
subprocess.check_call(['nmcli', 'c', 'add', 'type', 'bond', 'con-name', cname, 'connection.interface-name', cname, 'bond.options', 'mode={}'.format(stgs['team_mode'])] + cargs)
cargs.append('{}'.format(cmdargs[arg]))
if stgs['team_mode'] in self.bondtypes:
stgs['team_mode'] = self.bondtypes[stgs['team_mode']]
subprocess.check_call(['nmcli', 'c', 'add', 'type', 'bond', 'con-name', cname, 'connection.interface-name', cname, 'bond.options', 'miimon=100,mode={}'.format(stgs['team_mode'])] + cargs)
for iface in cfg['interfaces']:
self.add_team_member(cname, iface)
subprocess.check_call(['nmcli', 'c', 'u', cname])
@@ -412,8 +423,9 @@ class NetworkManager(object):
iname = list(cfg['interfaces'])[0]
ctype = self.devtypes.get(iname, None)
if not ctype:
sys.stderr.write("Warning, no device found for interface_name ({0}), skipping setup\n".format(iname))
return
if lastchance:
sys.stderr.write("Warning, no device found for interface_name ({0}), skipping setup\n".format(iname))
return 1
if stgs.get('vlan_id', None):
vlan = stgs['vlan_id']
if ctype == 'infiniband':
@@ -434,7 +446,7 @@ class NetworkManager(object):
cargs = []
for arg in cmdargs:
cargs.append(arg)
cargs.append(cmdargs[arg])
cargs.append('{}'.format(cmdargs[arg]))
if u:
subprocess.check_call(['nmcli', 'c', 'm', u, 'connection.interface-name', iname] + cargs)
subprocess.check_call(['nmcli', 'c', 'u', u])
@@ -448,6 +460,9 @@ class NetworkManager(object):
if __name__ == '__main__':
checktarg = None
if '-c' in sys.argv:
checktarg = sys.argv[sys.argv.index('-c') + 1]
havefirewall = subprocess.call(['systemctl', 'status', 'firewalld'])
havefirewall = havefirewall == 0
if havefirewall:
@@ -473,15 +488,16 @@ if __name__ == '__main__':
continue
myname = s.getsockname()
s.close()
curridx = None
if len(myname) == 4:
curridx = myname[-1]
else:
myname = myname[0]
myname = socket.inet_pton(socket.AF_INET, myname)
for addr in myaddrs:
if myname == addr[1].tobytes():
if myname == addr[1]:
curridx = addr[-1]
if curridx in doneidxs:
if curridx is not None and curridx in doneidxs:
continue
for tries in (1, 2, 3):
try:
@@ -533,13 +549,49 @@ if __name__ == '__main__':
rm_tmp_llas(tmpllas)
if os.path.exists('/usr/sbin/netplan'):
nm = NetplanManager(dc)
if os.path.exists('/usr/bin/nmcli'):
elif os.path.exists('/usr/bin/nmcli'):
nm = NetworkManager(devtypes, dc)
elif os.path.exists('/usr/sbin/wicked'):
nm = WickedManager()
retrynics = []
for netn in netname_to_interfaces:
nm.apply_configuration(netname_to_interfaces[netn])
redo = nm.apply_configuration(netname_to_interfaces[netn])
if redo == 1:
retrynics.append(netn)
if retrynics:
idxmap, devtypes = map_idx_to_name()
if os.path.exists('/usr/sbin/netplan'):
nm = NetplanManager(dc)
if os.path.exists('/usr/bin/nmcli'):
nm = NetworkManager(devtypes, dc)
elif os.path.exists('/usr/sbin/wicked'):
nm = WickedManager()
for netn in retrynics:
nm.apply_configuration(netname_to_interfaces[netn], lastchance=True)
if havefirewall:
subprocess.check_call(['systemctl', 'start', 'firewalld'])
await_tentative()
maxwait = 10
while maxwait:
try:
tclient = apiclient.HTTPSClient(checkonly=True)
tclient.check_connections()
break
except Exception:
maxwait -= 1
time.sleep(1)
maxwait = 10
if checktarg:
while maxwait:
try:
addrinf = socket.getaddrinfo(checktarg, 443)[0]
psock = socket.socket(addrinf[0], socket.SOCK_STREAM)
psock.settimeout(10)
psock.connect(addrinf[4])
psock.close()
break
except Exception:
maxwait -= 1
time.sleep(1)

View File

@@ -1,49 +0,0 @@
is_suse=false
is_rhel=false
if test -f /boot/efi/EFI/redhat/grub.cfg; then
grubcfg="/boot/efi/EFI/redhat/grub.cfg"
grub2-mkconfig -o $grubcfg
is_rhel=true
elif test -f /boot/efi/EFI/sle_hpc/grub.cfg; then
grubcfg="/boot/efi/EFI/sle_hpc/grub.cfg"
grub2-mkconfig -o $grubcfg
is_suse=true
else
echo "Expected File missing: Check if os sle_hpc or redhat"
exit
fi
# working on SUSE
if $is_suse; then
start=false
num_line=0
lines_to_edit=()
while read line; do
((num_line++))
if [[ $line == *"grub_platform"* ]]; then
start=true
fi
if $start; then
if [[ $line != "#"* ]];then
lines_to_edit+=($num_line)
fi
fi
if [[ ${#line} -eq 2 && $line == *"fi" ]]; then
if $start; then
start=false
fi
fi
done < grub_cnf.cfg
for line_num in "${lines_to_edit[@]}"; do
line_num+="s"
sed -i "${line_num},^,#," $grubcfg
done
sed -i 's,^terminal,#terminal,' $grubcfg
fi
# Working on Redhat
if $is_rhel; then
sed -i 's,^serial,#serial, ; s,^terminal,#terminal,' $grubcfg
fi

View File

@@ -7,7 +7,7 @@ for pubkey in /etc/ssh/ssh_host*key.pub; do
continue
fi
certfile=${pubkey/.pub/-cert.pub}
rm $certfile
echo -n > $certfile
confluentpython $confapiclient /confluent-api/self/sshcert $pubkey -o $certfile
done
if [ -d /etc/ssh/sshd_config.d/ -a ! -e /etc/ssh/sshd_config.d/90-confluent.conf ]; then
@@ -17,6 +17,13 @@ if [ -d /etc/ssh/sshd_config.d/ -a ! -e /etc/ssh/sshd_config.d/90-confluent.conf
echo HostbasedAuthentication yes >> /etc/ssh/sshd_config.d/90-confluent.conf
echo HostbasedUsesNameFromPacketOnly yes >> /etc/ssh/sshd_config.d/90-confluent.conf
echo IgnoreRhosts no >> /etc/ssh/sshd_config.d/90-confluent.conf
elif [ ! -d /etc/ssh/sshd_config.d/ ] && ! grep HostCertificate /etc/ssh/sshd_config > /dev/null; then
for cert in /etc/ssh/ssh*-cert.pub; do
echo HostCertificate $cert >> /etc/ssh/sshd_config
done
echo HostbasedAuthentication yes >> /etc/ssh/sshd_config
echo HostbasedUsesNameFromPacketOnly yes >> /etc/ssh/sshd_config
echo IgnoreRhosts no >> /etc/ssh/sshd_config
fi
TMPDIR=$(mktemp -d)
@@ -25,13 +32,20 @@ confluentpython $confapiclient /confluent-public/site/initramfs.tgz -o initramfs
tar xf initramfs.tgz
for ca in ssh/*.ca; do
LINE=$(cat $ca)
cp -af /etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts.new
grep -v "$LINE" /etc/ssh/ssh_known_hosts > /etc/ssh/ssh_known_hosts.new
if [ -z "$LINE" ]; then continue; fi
if [ -f /etc/ssh/ssh_known_hosts ]; then
cp -af /etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts.new
grep -v "$LINE" /etc/ssh/ssh_known_hosts > /etc/ssh/ssh_known_hosts.new
fi
echo '@cert-authority *' $LINE >> /etc/ssh/ssh_known_hosts.new
mv /etc/ssh/ssh_known_hosts.new /etc/ssh/ssh_known_hosts
done
mkdir -p /root/.ssh/
chmod 700 /root/.ssh/
touch /root/.ssh/authorized_keys
for pubkey in ssh/*.*pubkey; do
LINE=$(cat $pubkey)
if [ -z "$LINE" ]; then continue; fi
cp -af /root/.ssh/authorized_keys /root/.ssh/authorized_keys.new
grep -v "$LINE" /root/.ssh/authorized_keys > /root/.ssh/authorized_keys.new
echo "$LINE" >> /root/.ssh/authorized_keys.new
@@ -41,3 +55,4 @@ confluentpython $confapiclient /confluent-api/self/nodelist | sed -e 's/^- //' >
cat /etc/ssh/shosts.equiv > /root/.shosts
cd -
rm -rf $TMPDIR
systemctl try-restart sshd

View File

@@ -26,11 +26,18 @@ mkdir -p opt/confluent/bin
mkdir -p stateless-bin
cp -a el8bin/* .
ln -s el8 el9
for os in rhvh4 el7 genesis el8 suse15 ubuntu20.04 ubuntu22.04 ubuntu24.04 coreos el9; do
ln -s el8 el10
cp -a debian debian13
mkdir -p debian13/initramfs/usr
mv debian13/initramfs/lib debian13/initramfs/usr/
mv el10/initramfs/usr el10/initramfs/var
for os in rhvh4 el7 genesis el8 suse15 debian debian13 ubuntu20.04 ubuntu22.04 ubuntu24.04 coreos el9 el10; do
mkdir ${os}out
cd ${os}out
if [ -d ../${os}bin ]; then
cp -a ../${os}bin/opt .
elif [ $os = el10 ]; then
cp -a ../el9bin/opt .
else
cp -a ../opt .
fi
@@ -40,11 +47,13 @@ for os in rhvh4 el7 genesis el8 suse15 ubuntu20.04 ubuntu22.04 ubuntu24.04 coreo
mv ../addons.cpio .
cd ..
done
for os in el7 el8 suse15 el9 ubuntu20.04; do
for os in el7 el8 suse15 el9 el10 ubuntu20.04; do
mkdir ${os}disklessout
cd ${os}disklessout
if [ -d ../${os}bin ]; then
cp -a ../${os}bin/opt .
elif [ $os = el10 ]; then
cp -a ../el9bin/opt .
else
cp -a ../opt .
fi
@@ -76,7 +85,10 @@ cp -a esxi7 esxi8
%install
mkdir -p %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/
#cp LICENSE %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/
for os in rhvh4 el7 el8 el9 genesis suse15 ubuntu20.04 ubuntu22.04 ubuntu24.04 esxi6 esxi7 esxi8 coreos; do
mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/common
cp common/initramfs/opt/confluent/bin/apiclient %{buildroot}/opt/confluent/lib/osdeploy/common
for os in rhvh4 el7 el8 el9 el10 debian debian13 genesis suse15 ubuntu20.04 ubuntu22.04 ubuntu24.04 esxi6 esxi7 esxi8 coreos; do
mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs/aarch64/
cp ${os}out/addons.* %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs/aarch64/
if [ -d ${os}disklessout ]; then

View File

@@ -29,8 +29,11 @@ This contains support utilities for enabling deployment of x86_64 architecture s
#cd ..
ln -s el8 el9
cp -a el8 el10
cp -a debian debian13
mkdir -p debian13/initramfs/usr
mv debian13/initramfs/lib debian13/initramfs/usr/
mv el10/initramfs/usr el10/initramfs/var
for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 ubuntu24.04 coreos el9 el10; do
for os in rhvh4 el7 genesis el8 suse15 debian debian13 ubuntu18.04 ubuntu20.04 ubuntu22.04 ubuntu24.04 coreos el9 el10; do
mkdir ${os}out
cd ${os}out
if [ -d ../${os}bin ]; then
@@ -46,11 +49,13 @@ for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 ubunt
mv ../addons.cpio .
cd ..
done
for os in el7 el8 suse15 el9 ubuntu20.04 ubuntu22.04 ubuntu24.04; do
for os in el7 el8 suse15 el9 el10 ubuntu20.04 ubuntu22.04 ubuntu24.04; do
mkdir ${os}disklessout
cd ${os}disklessout
if [ -d ../${os}bin ]; then
cp -a ../${os}bin/opt .
elif [ $os = el10 ]; then
cp -a ../el9bin/opt .
else
cp -a ../el8bin/opt .
fi
@@ -77,12 +82,14 @@ cd ..
cp -a esxi7out esxi6out
cp -a esxi7 esxi6
cp -a esxi7out esxi8out
cp -a esxi7out esxi9out
cp -a esxi7 esxi8
cp -a esxi7 esxi9
%install
mkdir -p %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/
cp LICENSE %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/
for os in rhvh4 el7 el8 el9 el10 genesis suse15 ubuntu20.04 ubuntu18.04 ubuntu22.04 ubuntu24.04 esxi6 esxi7 esxi8 coreos; do
for os in rhvh4 el7 el8 el9 el10 genesis suse15 ubuntu20.04 debian debian13 ubuntu18.04 ubuntu22.04 ubuntu24.04 esxi6 esxi7 esxi8 esxi9 coreos; do
mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs
mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/$os/profiles
cp ${os}out/addons.* %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs

View File

@@ -14,65 +14,127 @@ setdebopt() {
echo d-i $1 $3 $2 >> /preseed.cfg
}
dhuuid=$(reverse_uuid $(cat /sys/devices/virtual/dmi/id/product_uuid))
dhcpid=$(mktemp)
mkdir -p /etc/confluent
cp /tls/* /etc/ssl/certs/
for nic in $(ip link | grep mtu|grep -v LOOPBACK|cut -d: -f 2|sed -e 's/ //'); do
ip link set $nic up
for i in /sys/class/net/*; do
ip link set $(basename $i) up
done
for nic in $(ip link | grep mtu|grep -v LOOPBACK|grep LOWER_UP|cut -d: -f 2|sed -e 's/ //'); do
if udhcpc -i $nic -p $dhcpid -t 2 -T 2 -n -x 93:0007 -x 97:00$dhuuid -q; then
/opt/confluent/bin/copernicus > /etc/confluent/confluent.info
if grep ^MANAGER:.*\\. /etc/confluent/confluent.info ; then
break
fi
TRIES=5
while [ ! -e /dev/disk ] && [ $TRIES -gt 0 ]; do
sleep 2
TRIES=$((TRIES - 1))
done
for i in /sys/class/net/*; do
ip link set $(basename $i) down
udevadm info $i | grep ID_NET_DRIVER=cdc_ether > /dev/null && continue
ip link set $(basename $i) up
done
cp -a /tls/* /etc/ssl/certs/
mkdir -p /etc/confluent
if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then
tmnt=$(mktemp -d)
tcfg=$(mktemp)
mount /dev/disk/by-label/CNFLNT_IDNT $tmnt
cd $tmnt
deploysrvs=$(sed -n '/^deploy_servers:/,/^[^-]/p' cnflnt.yml |grep ^-|sed -e 's/^- //'|grep -v :)
nodename=$(grep ^nodename: cnflnt.yml|cut -f 2 -d ' ')
echo NODENAME: $nodename > /etc/confluent/confluent.info
sed -n '/^net_cfgs:/,/^[^- ]/{/^[^- ]/!p}' cnflnt.yml |sed -n '/^-/,/^-/{/^-/!p}'| sed -e 's/^[- ]*//'> $tcfg
autoconfigmethod=$(grep ^ipv4_method: $tcfg)
autoconfigmethod=${autoconfigmethod#ipv4_method: }
if [ "$autoconfigmethod" = "static" ]; then
setdebopt netcfg/disable_dhcp true boolean
v4addr=$(grep ^ipv4_address: $tcfg|cut -d: -f 2|sed -e 's/ //')
v4gw=$(grep ^ipv4_gateway: $tcfg|cut -d: -f 2| sed -e 's/ //')
if [ "$v4gw" = "null" ]; then
v4gw=""
fi
v4nm=$(grep ^ipv4_netmask: $tcfg|cut -d: -f 2|sed -e 's/ //')
setdebopt netcfg/get_netmask $v4nm string
setdebopt netcfg/get_ipaddress ${v4addr%/*} string
setdebopt netcfg/confirm_static true boolean
if [ ! -z "$v4gw" ]; then
setdebopt netcfg/get_gateway $v4gw string
fi
NIC=""
while [ -z "$NIC" ]; do
for NICGUESS in $(ip link|grep LOWER_UP|grep -v LOOPBACK|cut -d ' ' -f 2 | sed -e 's/:$//'); do
ip addr add dev $NICGUESS $v4addr
if [ ! -z "$v4gw" ]; then
ip route add default via $v4gw
fi
for dsrv in $deploysrvs; do
if wget https://$dsrv/confluent-public/ --tries=1 --timeout=1 -O /dev/null > /dev/null 2>&1; then
deploysrvs=$dsrv
NIC=$NICGUESS
setdebopt netcfg/choose_interface $NIC select
break
fi
done
if [ -z "$NIC" ]; then
ip -4 a flush dev $NICGUESS
else
break
fi
done
done
#TODO: nameservers
elif [ "$v4cfgmeth" = "dhcp" ]; then
setdebopt netcfg/disable_dhcp false boolean
setdebopt netcfg/confirm_static false boolean
for NICGUESS in $(ip link|grep LOWER_UP|grep -v LOOPBACK|cut -d ' ' -f 2 | sed -e 's/:$//'); do
udhcpc $NICGUESS
done
for dsrv in $deploysrvs; do
if wget https://$dsrv/confluent-public/ --tries=1 --timeout=1 -O /dev/null > /dev/null 2>&1; then
deploysrvs=$dsrv
fi
done
fi
ip -4 flush dev $nic
done
mgr=$(grep ^MANAGER:.*\\. /etc/confluent/confluent.info|head -n 1|cut -d: -f 2|sed -e 's/ //')
nodename=$(grep ^NODENAME: /etc/confluent/confluent.info|head -n 1|cut -d: -f 2|sed -e 's/ //')
/opt/confluent/bin/clortho $nodename $mgr > /etc/confluent/confluent.apikey
mgr=$deploysrvs
ln -s /opt/confluent/bin/clortho /opt/confluent/bin/genpasshmac
hmackeyfile=/tmp/cnflnthmackeytmp
passfile=/tmp/cnflnttmppassfile
passcrypt=/tmp/cnflntcryptfile
hmacfile=/tmp/cnflnthmacfile
echo -n $(grep ^apitoken: cnflnt.yml|cut -d ' ' -f 2) > $hmackeyfile
/opt/confluent/bin/genpasshmac $passfile $passcrypt $hmacfile $hmackeyfile
wget --header="CONFLUENT_NODENAME: $nodename" --header="CONFLUENT_CRYPTHMAC: $(cat $hmacfile)" --post-file=$passcrypt https://$mgr/confluent-api/self/registerapikey -O - --quiet
cp $passfile /etc/confluent/confluent.apikey
nic=$NIC
else
dhuuid=$(reverse_uuid $(cat /sys/devices/virtual/dmi/id/product_uuid))
dhcpid=$(mktemp)
mkdir -p /etc/confluent
cp /tls/* /etc/ssl/certs/
cat /tls/*.pem >> /etc/confluent/ca.pem
for nic in $(ip link | grep mtu|grep -v LOOPBACK|cut -d: -f 2|sed -e 's/ //'); do
ip link set $nic up
done
for nic in $(ip link | grep mtu|grep -v LOOPBACK|grep LOWER_UP|cut -d: -f 2|sed -e 's/ //'); do
if udhcpc -i $nic -p $dhcpid -t 2 -T 2 -n -x 93:0007 -x 97:00$dhuuid -q; then
/opt/confluent/bin/copernicus > /etc/confluent/confluent.info
if grep ^MANAGER:.*\\. /etc/confluent/confluent.info ; then
break
fi
fi
ip -4 flush dev $nic
done
mgr=$(grep ^MANAGER:.*\\. /etc/confluent/confluent.info|head -n 1|cut -d: -f 2|sed -e 's/ //')
nodename=$(grep ^NODENAME: /etc/confluent/confluent.info|head -n 1|cut -d: -f 2|sed -e 's/ //')
/opt/confluent/bin/clortho $nodename $mgr > /etc/confluent/confluent.apikey
fi
apikey=$(cat /etc/confluent/confluent.apikey)
cd /etc/confluent
wget --header="CONFLUENT_NODENAME: $nodename" --header="CONFLUENT_APIKEY: $apikey" https://$mgr/confluent-api/self/deploycfg
cd -
predir=$(mktemp -d)
cd $predir
cp /etc/confluent/deploycfg /etc/confluent/confluent.deploycfg
profile=$(grep ^profile: /etc/confluent/deploycfg|cut -d ' ' -f 2)
wget https://$mgr/confluent-public/os/$profile/scripts/pre.sh
chmod u+x pre.sh
wget https://$mgr/confluent-public/os/$profile/preseed.cfg
mv preseed.cfg /
setdebopt auto-install/enable true boolean
setdebopt partman/early_command $predir/pre.sh string
cd -
ip -4 a flush dev $nic
setdebopt netcfg/choose_interface $nic select
setdebopt netcfg/get_hostname $nodename string
v4cfgmeth=$(grep ipv4_method: /etc/confluent/deploycfg |cut -d: -f 2|sed -e 's/ //')
if [ "$v4cfgmeth" = "static" ]; then
setdebopt netcfg/disable_dhcp true boolean
v4addr=$(grep ^ipv4_address: /etc/confluent/deploycfg|cut -d: -f 2|sed -e 's/ //')
v4gw=$(grep ^ipv4_gateway: /etc/confluent/deploycfg|cut -d: -f 2| sed -e 's/ //')
if [ "$v4gw" = "null" ]; then
v4gw=""
fi
v4nm=$(grep ^ipv4_netmask: /etc/confluent/deploycfg|cut -d: -f 2|sed -e 's/ //')
setdebopt netcfg/get_netmask $v4nm string
setdebopt netcfg/get_ipaddress $v4addr string
setdebopt netcfg/confirm_static true boolean
if [ ! -z "$v4gw" ]; then
setdebopt netcfg/get_gateway $v4gw string
fi
namesrvs=$(sed -n '/^nameservers:/,/^[^-]/p' /etc/confluent/deploycfg|grep ^- | cut -d ' ' -f 2|sed -e 's/ //')
for namesrv in "$namesrvs"; do
setdebopt netcfg/get_nameservers $namesrv string
done
elif [ "$v4cfgmeth" = "dhcp" ]; then
setdebopt netcfg/disable_dhcp false boolean
setdebopt netcfg/confirm_static false boolean
fi
namesrvs=$(sed -n '/^nameservers:/,/^[^-]/p' /etc/confluent/deploycfg|grep ^- | cut -d ' ' -f 2|sed -e 's/ //')
for namesrv in "$namesrvs"; do
setdebopt netcfg/get_nameservers $namesrv string
done
rootpass=$(grep ^rootpassword: /etc/confluent/deploycfg|cut -d ' ' -f 2|sed -e 's/ //')
if [ "$rootpass" = null ] || [ -z "$rootpass" ]; then
setdebopt passwd/root-login false boolean
@@ -84,9 +146,8 @@ setdebopt time/zone $(grep ^timezone: /etc/confluent/deploycfg|cut -d ' ' -f 2|s
ntpsrvs=$(sed -n '/^ntpservers:/,/^[^-]/p' /etc/confluent/deploycfg|grep ^- | cut -d ' ' -f 2|sed -e 's/ //')
for ntpsrv in "$ntpsrvs"; do
setdebopt clock-setup/ntp true boolean
setdebopt clock-setup/ntep-server $ntpsrv string
setdebopt clock-setup/ntp-server $ntpsrv string
done
#setdebopt console-setup/layoutcode $(grep ^keymap: /etc/confluent/deploycfg|cut -d ' ' -f 2) string
setdebopt debian-installer/locale $(grep ^locale: /etc/confluent/deploycfg|cut -d ' ' -f 2) select
domainname=$(grep ^dnsdomain: /etc/confluent/deploycfg|cut -d ' ' -f 2)
if [ ! -z "$domainname" ] && [ "$domainname" != "null" ]; then
@@ -95,3 +156,54 @@ fi
wget https://$mgr/confluent-public/os/$profile/scripts/pre.sh
chmod u+x pre.sh
wget https://$mgr/confluent-public/os/$profile/scripts/prechroot.sh
chmod u+x prechroot.sh
wget https://$mgr/confluent-public/os/$profile/scripts/post.sh
chmod u+x post.sh
wget https://$mgr/confluent-public/os/$profile/preseed.cfg
cat preseed.cfg >> /preseed.cfg
echo $mgr > /etc/confluent/deployer
setdebopt auto-install/enable true boolean
setdebopt partman/early_command $predir/pre.sh string
setdebopt preseed/late_command $predir/prechroot.sh string
mv $predir/post.sh /tmp/
cd -
ip -4 a flush dev $nic
setdebopt netcfg/choose_interface $nic select
setdebopt netcfg/get_hostname $nodename string
setdebopt netcfg/hostname $nodename string
setdebopt mirror/protocol https string
setdebopt mirror/country manual string
setdebopt mirror/https/hostname deb.debian.org string
setdebopt mirror/https/directory /debian/ string
setdebopt mirror/protocol https string
setdebopt mirror/https/proxy "" string
#setdebopt apt-setup/security_host $mgr string
if [ ! -e /dev/disk/by-label/CNFLNT_IDNT ]; then
v4cfgmeth=$(grep ipv4_method: /etc/confluent/deploycfg |cut -d: -f 2|sed -e 's/ //')
if [ "$v4cfgmeth" = "static" ]; then
setdebopt netcfg/disable_dhcp true boolean
v4addr=$(grep ^ipv4_address: /etc/confluent/deploycfg|cut -d: -f 2|sed -e 's/ //')
v4gw=$(grep ^ipv4_gateway: /etc/confluent/deploycfg|cut -d: -f 2| sed -e 's/ //')
if [ "$v4gw" = "null" ]; then
v4gw=""
fi
v4nm=$(grep ^ipv4_netmask: /etc/confluent/deploycfg|cut -d: -f 2|sed -e 's/ //')
setdebopt netcfg/get_netmask $v4nm string
setdebopt netcfg/get_ipaddress $v4addr string
setdebopt netcfg/confirm_static true boolean
if [ ! -z "$v4gw" ]; then
setdebopt netcfg/get_gateway $v4gw string
fi
namesrvs=$(sed -n '/^nameservers:/,/^[^-]/p' /etc/confluent/deploycfg|grep ^- | cut -d ' ' -f 2|sed -e 's/ //')
for namesrv in "$namesrvs"; do
setdebopt netcfg/get_nameservers $namesrv string
done
elif [ "$vpcfgmeth" = "dhcp" ]; then
setdebopt netcfg/disable_dhcp false boolean
setdebopt netcfg/confirm_static false boolean
fi
fi

View File

@@ -0,0 +1,8 @@
#!/bin/sh
sed -i 's/label: debian/label: Debian/' $2/profile.yaml && \
ln -s $1/linux $2/boot/kernel && \
ln -s $1/initrd.gz $2/boot/initramfs/distribution && \
mkdir -p $2/boot/efi/boot && \
rm $2/distribution && \
mcopy -i $1/boot/grub/efi.img ::/efi/boot/* $2/boot/efi/boot

View File

@@ -0,0 +1,27 @@
d-i anna/choose_modules string openssh-server-udeb
d-i partman-auto/method string regular
d-i partman-lvm/device_remove_lvm boolean true
d-i partman-md/device_remove_md boolean true
d-i partman-auto/expert_recipe_file string /tmp/partitionfile
d-i partman/confirm_write_new_label boolean true
d-i partman/choose_partition select finish
d-i partman/confirm boolean true
d-i partman/confirm_nooverwrite boolean true
d-i passwd/make-user boolean false
d-i clock-setup/utc boolean true
d-i apt-setup/multiverse boolean false
d-i apt-setup/universe boolean false
d-i apt-setup/backports boolean false
d-i apt-setup/updates boolean false
d-i grub-installer/only_debian boolean true
tasksel tasksel/first multiselect standard
d-i pkgsel/include string openssh-server curl
d-i pkgsel/update-policy select none
d-i pkgsel/updatedb boolean false
d-i finish-install/reboot_in_progress note
popularity-contest popularity-contest/participate boolean false
d-i partman-auto/method string lvm
d-i partman-auto/choose_recipe select atomic
d-i partman-lvm/confirm boolean true
d-i partman-lvm/confirm_nooverwrite boolean true
d-i partman-auto-lvm/guided_size string max

View File

@@ -0,0 +1,3 @@
label: %%DISTRO%% %%VERSION%% %%ARCH%% (Default Profile)
kernelargs: quiet osprofile=%%PROFILE%%
#installedargs: example # These arguments would be added to the installed system

View File

@@ -0,0 +1,570 @@
#!/usr/bin/python
import glob
import json
import os
import socket
import sys
import time
import shlex
import subprocess
try:
import yaml
except ImportError:
pass
try:
from importlib.machinery import SourceFileLoader
def load_source(mod, path):
return SourceFileLoader(mod, path).load_module()
except ImportError:
from imp import load_source
try:
apiclient = load_source('apiclient', '/opt/confluent/bin/apiclient')
except IOError:
apiclient = load_source('apiclient', '/etc/confluent/apiclient')
def add_lla(iface, mac):
pieces = mac.split(':')
initbyte = int(pieces[0], 16) ^ 2
lla = 'fe80::{0:x}{1}:{2}ff:fe{3}:{4}{5}/64'.format(initbyte, pieces[1], pieces[2], pieces[3], pieces[4], pieces[5])
try:
with open('/proc/sys/net/ipv6/conf/{0}/disable_ipv6'.format(iface), 'w') as setin:
setin.write('0')
subprocess.check_call(['ip', 'addr', 'add', 'dev', iface, lla, 'scope', 'link'])
except Exception:
return None
return lla
#cli = apiclient.HTTPSClient(json=True)
#c = cli.grab_url_with_status('/confluent-api/self/netcfg')
def add_missing_llas():
#NetworkManager goes out of its way to suppress ipv6 lla, so will just add some
added = {}
linkinfo = subprocess.check_output(['ip', '-br', 'l']).decode('utf8')
ifaces = {}
for line in linkinfo.split('\n'):
line = line.strip().split()
if not line or 'LOOPBACK' in line[-1] or 'NO-CARRIER' in line[-1]:
continue
if 'UP' not in line[-1]:
subprocess.call(['ip', 'link', 'set', line[0], 'up'])
ifaces[line[0]] = line[2]
ips = {}
ipinfo = subprocess.check_output(['ip', '-br', '-6', 'a']).decode('utf8')
for line in ipinfo.split('\n'):
line = line.strip().split(None, 2)
if not line:
continue
ips[line[0]] = line[2]
for iface in ifaces:
for addr in ips.get(iface, '').split():
if addr.startswith('fe80::'):
break
else:
newlla = add_lla(iface, ifaces[iface])
if newlla:
added[iface] = newlla
return added
def rm_tmp_llas(tmpllas):
for iface in tmpllas:
subprocess.check_call(['ip', 'addr', 'del', 'dev', iface, tmpllas[iface]])
def await_tentative():
maxwait = 10
while b'tentative' in subprocess.check_output(['ip', 'a']):
if maxwait == 0:
break
maxwait -= 1
time.sleep(1)
def map_idx_to_name():
map = {}
devtype = {}
prevdev = None
for line in subprocess.check_output(['ip', 'l']).decode('utf8').splitlines():
if line.startswith(' ') and 'link/' in line:
typ = line.split()[0].split('/')[1]
devtype[prevdev] = typ if typ != 'ether' else 'ethernet'
if line.startswith(' '):
continue
idx, iface, rst = line.split(':', 2)
prevdev = iface.strip()
rst = rst.split()
try:
midx = rst.index('master')
continue
except ValueError:
pass
idx = int(idx)
iface = iface.strip()
map[idx] = iface
return map, devtype
def get_interface_name(iname, settings):
explicitname = settings.get('interface_names', None)
if explicitname:
return explicitname
if settings.get('current_nic', False):
return iname
return None
class NetplanManager(object):
def __init__(self, deploycfg):
self.cfgbydev = {}
self.read_connections()
self.deploycfg = deploycfg
def read_connections(self):
for plan in glob.glob('/etc/netplan/*.y*ml'):
with open(plan) as planfile:
planinfo = yaml.safe_load(planfile)
if not planinfo:
continue
nicinfo = planinfo.get('network', {}).get('ethernets', {})
for devname in nicinfo:
if devname == 'lo':
continue
if 'gateway4' in nicinfo[devname]:
# normalize deprecated syntax on read in
gw4 = nicinfo[devname]['gateway4']
del nicinfo[devname]['gateway4']
routeinfo = nicinfo[devname].get('routes', [])
for ri in routeinfo:
if ri.get('via', None) == gw4 and ri.get('to', None) in ('default', '0.0.0.0/0', '0/0'):
break
else:
routeinfo.append({
'to': 'default',
'via': gw4
})
nicinfo[devname]['routes'] = routeinfo
self.cfgbydev[devname] = nicinfo[devname]
def apply_configuration(self, cfg):
devnames = cfg['interfaces']
if len(devnames) != 1:
raise Exception('Multi-nic team/bonds not yet supported')
stgs = cfg['settings']
needcfgapply = False
for devname in devnames:
needcfgwrite = False
# ipv6_method missing at uconn...
if stgs.get('ipv6_method', None) == 'static':
curraddr = stgs['ipv6_address']
currips = self.getcfgarrpath([devname, 'addresses'])
if curraddr not in currips:
needcfgwrite = True
currips.append(curraddr)
if stgs.get('ipv4_method', None) == 'static':
curraddr = stgs['ipv4_address']
currips = self.getcfgarrpath([devname, 'addresses'])
if curraddr not in currips:
needcfgwrite = True
currips.append(curraddr)
if stgs.get('mtu', None):
devdict = self.getcfgarrpath([devname])
devdict['mtu'] = int(stgs['mtu'])
gws = []
gws.append(stgs.get('ipv4_gateway', None))
gws.append(stgs.get('ipv6_gateway', None))
for gwaddr in gws:
if gwaddr:
cfgroutes = self.getcfgarrpath([devname, 'routes'])
for rinfo in cfgroutes:
if rinfo.get('via', None) == gwaddr:
break
else:
needcfgwrite = True
cfgroutes.append({'via': gwaddr, 'to': 'default'})
dnsips = self.deploycfg.get('nameservers', [])
dnsdomain = self.deploycfg.get('dnsdomain', '')
if dnsips:
currdnsips = self.getcfgarrpath([devname, 'nameservers', 'addresses'])
for dnsip in dnsips:
if dnsip and dnsip not in currdnsips:
needcfgwrite = True
currdnsips.append(dnsip)
if dnsdomain:
currdnsdomain = self.getcfgarrpath([devname, 'nameservers', 'search'])
if dnsdomain not in currdnsdomain:
needcfgwrite = True
currdnsdomain.append(dnsdomain)
if needcfgwrite:
needcfgapply = True
newcfg = {'network': {'version': 2, 'ethernets': {devname: self.cfgbydev[devname]}}}
oumask = os.umask(0o77)
with open('/etc/netplan/{0}-confluentcfg.yaml'.format(devname), 'w') as planout:
planout.write(yaml.dump(newcfg))
os.umask(oumask)
if needcfgapply:
subprocess.call(['netplan', 'apply'])
def getcfgarrpath(self, devpath):
currptr = self.cfgbydev
for k in devpath[:-1]:
if k not in currptr:
currptr[k] = {}
currptr = currptr[k]
if devpath[-1] not in currptr:
currptr[devpath[-1]] = []
return currptr[devpath[-1]]
class WickedManager(object):
def __init__(self):
self.teamidx = 0
self.read_connections()
def read_connections(self):
self.cfgbydev = {}
for ifcfg in glob.glob('/etc/sysconfig/network/ifcfg-*'):
devname = ifcfg.replace('/etc/sysconfig/network/ifcfg-', '')
if devname == 'lo':
continue
currcfg = {}
self.cfgbydev[devname] = currcfg
for cfg in open(ifcfg).read().splitlines():
cfg = cfg.split('#', 1)[0]
try:
kv = ' '.join(shlex.split(cfg)).split('=', 1)
except Exception:
# unparseable line, likely having something we can't handle
del self.cfgbydev[devname]
if len(kv) != 2:
continue
k, v = kv
k = k.strip()
v = v.strip()
currcfg[k] = v
def apply_configuration(self, cfg):
stgs = cfg['settings']
ipcfg = 'STARTMODE=auto\n'
routecfg = ''
bootproto4 = stgs.get('ipv4_method', 'none')
bootproto6 = stgs.get('ipv6_method', 'none')
if bootproto4 == 'dhcp' and bootproto6 == 'dhcp':
ipcfg += 'BOOTPROTO=dhcp\n'
elif bootproto4 == 'dhcp':
ipcfg += 'BOOTPROTO=dhcp4\n'
elif bootproto6 == 'dhcp':
ipcfg += 'BOOTPROTO=dhcp6\n'
else:
ipcfg += 'BOOTPROTO=static\n'
if stgs.get('ipv4_address', None):
ipcfg += 'IPADDR=' + stgs['ipv4_address'] + '\n'
v4gw = stgs.get('ipv4_gateway', None)
if stgs.get('ipv6_address', None):
ipcfg += 'IPADDR_V6=' + stgs['ipv6_address'] + '\n'
v6gw = stgs.get('ipv6_gateway', None)
cname = None
if len(cfg['interfaces']) > 1: # creating new team
if not stgs.get('team_mode', None):
sys.stderr.write("Warning, multiple interfaces ({0}) without a team_mode, skipping setup\n".format(','.join(cfg['interfaces'])))
return
if not stgs.get('connection_name', None):
stgs['connection_name'] = 'bond{0}'.format(self.teamidx)
self.teamidx += 1
cname = stgs['connection_name']
with open('/etc/sysconfig/network/ifcfg-{0}'.format(cname), 'w') as teamout:
teamout.write(ipcfg)
if stgs['team_mode'] == 'lacp':
stgs['team_mode'] = '802.3ad'
teamout.write("BONDING_MODULE_OPTS='mode={0} miimon=100'\nBONDING_MASTER=yes\n".format(stgs['team_mode']))
idx = 1
for iface in cfg['interfaces']:
subprocess.call(['wicked', 'ifdown', iface])
try:
os.remove('/etc/sysconfig/network/ifcfg-{0}'.format(iface))
os.remove('/etc/sysconfig/network/ifroute-{0}'.format(iface))
except OSError:
pass
teamout.write('BONDING_SLAVE{0}={1}\n'.format(idx, iface))
idx += 1
else:
cname = list(cfg['interfaces'])[0]
priorcfg = self.cfgbydev.get(cname, {})
for cf in priorcfg:
if cf.startswith('TEAM_'):
ipcfg += '{0}={1}\n'.format(cf, priorcfg[cf])
with open('/etc/sysconfig/network/ifcfg-{0}'.format(cname), 'w') as iout:
iout.write(ipcfg)
if v4gw:
routecfg += 'default {0} - {1}\n'.format(v4gw, cname)
if v6gw:
routecfg += 'default {0} - {1}\n'.format(v6gw, cname)
if routecfg:
with open('/etc/sysconfig/network/ifroute-{0}'.format(cname), 'w') as routeout:
routeout.write(routecfg)
subprocess.call(['wicked', 'ifup', cname])
class NetworkManager(object):
bondtypes = {
'lacp': '802.3ad',
'loadbalance': 'balance-alb',
'roundrobin': 'balance-rr',
'activebackup': 'active-backup',
}
def __init__(self, devtypes, deploycfg):
self.deploycfg = deploycfg
self.connections = {}
self.uuidbyname = {}
self.uuidbydev = {}
self.connectiondetail = {}
self.read_connections()
self.teamidx = 0
self.devtypes = devtypes
def read_connections(self):
self.connections = {}
self.uuidbyname = {}
self.uuidbydev = {}
self.connectiondetail = {}
ci = subprocess.check_output(['nmcli', '-t', 'c']).decode('utf8')
for inf in ci.splitlines():
n, u, t, dev = inf.split(':')
if n == 'NAME':
continue
if dev == '--':
dev = None
self.uuidbyname[n] = u
if dev:
self.uuidbydev[dev] = u
self.connections[u] = {'name': n, 'uuid': u, 'type': t, 'dev': dev}
deats = {}
for deat in subprocess.check_output(['nmcli', 'c', 's', u]).decode('utf8').splitlines():
k, v = deat.split(':', 1)
v = v.strip()
if v == '--':
continue
if '(default)' in v:
continue
deats[k] = v
self.connectiondetail[u] = deats
def add_team_member(self, team, member):
bondcfg = {}
if member in self.uuidbydev:
myuuid = self.uuidbydev[member]
deats = self.connectiondetail[myuuid]
currteam = deats.get('connection.master', None)
if currteam == team:
return
for stg in ('ipv4.dhcp-hostname', 'ipv4.dns', 'ipv6.dns', 'ipv6.dhcp-hostname'):
if deats.get(stg, None):
bondcfg[stg] = deats[stg]
if member in self.uuidbyname:
subprocess.check_call(['nmcli', 'c', 'del', self.uuidbyname[member]])
devtype = self.devtypes.get(member, 'bond-slave')
subprocess.check_call(['nmcli', 'c', 'add', 'type', devtype, 'master', team, 'con-name', member, 'connection.interface-name', member])
if bondcfg:
args = []
for parm in bondcfg:
args.append(parm)
args.append(bondcfg[parm])
subprocess.check_call(['nmcli', 'c', 'm', team] + args)
def apply_configuration(self, cfg, lastchance=False):
cmdargs = {}
cmdargs['connection.autoconnect'] = 'yes'
stgs = cfg['settings']
cmdargs['ipv6.method'] = stgs.get('ipv6_method', 'link-local')
if stgs.get('ipv6_address', None):
cmdargs['ipv6.addresses'] = stgs['ipv6_address']
cmdargs['ipv4.method'] = stgs.get('ipv4_method', 'disabled')
if stgs.get('ipv4_address', None):
cmdargs['ipv4.addresses'] = stgs['ipv4_address']
if stgs.get('ipv4_gateway', None):
cmdargs['ipv4.gateway'] = stgs['ipv4_gateway']
if stgs.get('ipv6_gateway', None):
cmdargs['ipv6.gateway'] = stgs['ipv6_gateway']
if stgs.get('mtu', None):
cmdargs['802-3-ethernet.mtu'] = stgs['mtu']
dnsips = self.deploycfg.get('nameservers', [])
if not dnsips:
dnsips = []
dns4 = []
dns6 = []
for dnsip in dnsips:
if '.' in dnsip:
dns4.append(dnsip)
elif ':' in dnsip:
dns6.append(dnsip)
if dns4:
cmdargs['ipv4.dns'] = ','.join(dns4)
if dns6:
cmdargs['ipv6.dns'] = ','.join(dns6)
if len(cfg['interfaces']) > 1: # team time.. should be..
if not cfg['settings'].get('team_mode', None):
sys.stderr.write("Warning, multiple interfaces ({0}) without a team_mode, skipping setup\n".format(','.join(cfg['interfaces'])))
return
if not cfg['settings'].get('connection_name', None):
cfg['settings']['connection_name'] = 'team{0}'.format(self.teamidx)
self.teamidx += 1
cname = cfg['settings']['connection_name']
cargs = []
for arg in cmdargs:
cargs.append(arg)
cargs.append('{}'.format(cmdargs[arg]))
if stgs['team_mode'] in self.bondtypes:
stgs['team_mode'] = self.bondtypes[stgs['team_mode']]
subprocess.check_call(['nmcli', 'c', 'add', 'type', 'bond', 'con-name', cname, 'connection.interface-name', cname, 'bond.options', 'miimon=100,mode={}'.format(stgs['team_mode'])] + cargs)
for iface in cfg['interfaces']:
self.add_team_member(cname, iface)
subprocess.check_call(['nmcli', 'c', 'u', cname])
else:
cname = stgs.get('connection_name', None)
iname = list(cfg['interfaces'])[0]
ctype = self.devtypes.get(iname, None)
if not ctype:
if lastchance:
sys.stderr.write("Warning, no device found for interface_name ({0}), skipping setup\n".format(iname))
return 1
if stgs.get('vlan_id', None):
vlan = stgs['vlan_id']
if ctype == 'infiniband':
vlan = '0x{0}'.format(vlan) if not vlan.startswith('0x') else vlan
cmdargs['infiniband.parent'] = iname
cmdargs['infiniband.p-key'] = vlan
iname = '{0}.{1}'.format(iname, vlan[2:])
elif ctype == 'ethernet':
ctype = 'vlan'
cmdargs['vlan.parent'] = iname
cmdargs['vlan.id'] = vlan
iname = '{0}.{1}'.format(iname, vlan)
else:
sys.stderr.write("Warning, unknown interface_name ({0}) device type ({1}) for VLAN/PKEY, skipping setup\n".format(iname, ctype))
return
cname = iname if not cname else cname
u = self.uuidbyname.get(cname, None)
cargs = []
for arg in cmdargs:
cargs.append(arg)
cargs.append('{}'.format(cmdargs[arg]))
if u:
subprocess.check_call(['nmcli', 'c', 'm', u, 'connection.interface-name', iname] + cargs)
subprocess.check_call(['nmcli', 'c', 'u', u])
else:
subprocess.check_call(['nmcli', 'c', 'add', 'type', ctype, 'con-name', cname, 'connection.interface-name', iname] + cargs)
self.read_connections()
u = self.uuidbyname.get(cname, None)
if u:
subprocess.check_call(['nmcli', 'c', 'u', u])
if __name__ == '__main__':
havefirewall = subprocess.call(['systemctl', 'status', 'firewalld'])
havefirewall = havefirewall == 0
if havefirewall:
subprocess.check_call(['systemctl', 'stop', 'firewalld'])
tmpllas = add_missing_llas()
await_tentative()
idxmap, devtypes = map_idx_to_name()
netname_to_interfaces = {}
myaddrs = apiclient.get_my_addresses()
srvs, _ = apiclient.scan_confluents()
doneidxs = set([])
dc = None
if not srvs: # the multicast scan failed, fallback to deploycfg cfg file
with open('/etc/confluent/confluent.deploycfg', 'r') as dci:
for cfgline in dci.read().split('\n'):
if cfgline.startswith('deploy_server:'):
srvs = [cfgline.split()[1]]
break
for srv in srvs:
try:
s = socket.create_connection((srv, 443))
except socket.error:
continue
myname = s.getsockname()
s.close()
if len(myname) == 4:
curridx = myname[-1]
else:
myname = myname[0]
myname = socket.inet_pton(socket.AF_INET, myname)
for addr in myaddrs:
if myname == addr[1].tobytes():
curridx = addr[-1]
if curridx in doneidxs:
continue
for tries in (1, 2, 3):
try:
status, nc = apiclient.HTTPSClient(usejson=True, host=srv).grab_url_with_status('/confluent-api/self/netcfg')
break
except Exception:
if tries == 3:
raise
time.sleep(1)
continue
nc = json.loads(nc)
if not dc:
for tries in (1, 2, 3):
try:
status, dc = apiclient.HTTPSClient(usejson=True, host=srv).grab_url_with_status('/confluent-api/self/deploycfg2')
break
except Exception:
if tries == 3:
raise
time.sleep(1)
continue
dc = json.loads(dc)
iname = get_interface_name(idxmap[curridx], nc.get('default', {}))
if iname:
for iname in iname.split(','):
if 'default' in netname_to_interfaces:
netname_to_interfaces['default']['interfaces'].add(iname)
else:
netname_to_interfaces['default'] = {'interfaces': set([iname]), 'settings': nc['default']}
for netname in nc.get('extranets', {}):
uname = '_' + netname
iname = get_interface_name(idxmap[curridx], nc['extranets'][netname])
if iname:
for iname in iname.split(','):
if uname in netname_to_interfaces:
netname_to_interfaces[uname]['interfaces'].add(iname)
else:
netname_to_interfaces[uname] = {'interfaces': set([iname]), 'settings': nc['extranets'][netname]}
doneidxs.add(curridx)
if 'default' in netname_to_interfaces:
for netn in netname_to_interfaces:
if netn == 'default':
continue
netname_to_interfaces['default']['interfaces'] -= netname_to_interfaces[netn]['interfaces']
if not netname_to_interfaces['default']['interfaces']:
del netname_to_interfaces['default']
# Make sure VLAN/PKEY connections are created last
netname_to_interfaces = dict(sorted(netname_to_interfaces.items(), key=lambda item: 'vlan_id' in item[1]['settings']))
rm_tmp_llas(tmpllas)
if os.path.exists('/usr/sbin/netplan'):
nm = NetplanManager(dc)
if os.path.exists('/usr/bin/nmcli'):
nm = NetworkManager(devtypes, dc)
elif os.path.exists('/usr/sbin/wicked'):
nm = WickedManager()
retrynics = []
for netn in netname_to_interfaces:
redo = nm.apply_configuration(netname_to_interfaces[netn])
if redo == 1:
retrynics.append(netn)
if retrynics:
idxmap, devtypes = map_idx_to_name()
if os.path.exists('/usr/sbin/netplan'):
nm = NetplanManager(dc)
if os.path.exists('/usr/bin/nmcli'):
nm = NetworkManager(devtypes, dc)
elif os.path.exists('/usr/sbin/wicked'):
nm = WickedManager()
for netn in retrynics:
nm.apply_configuration(netname_to_interfaces[netn], lastchance=True)
if havefirewall:
subprocess.check_call(['systemctl', 'start', 'firewalld'])
await_tentative()

View File

@@ -0,0 +1,11 @@
[Unit]
Description=First Boot Process
Requires=network-online.target
After=network-online.target
[Service]
ExecStart=/opt/confluent/bin/firstboot.sh
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,22 @@
#!/bin/bash
echo "Confluent first boot is running"
HOME=$(getent passwd $(whoami)|cut -d: -f 6)
export HOME
#cp -a /etc/confluent/ssh/* /etc/ssh/
#systemctl restart sshd
rootpw=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg |awk '{print $2}')
if [ ! -z "$rootpw" -a "$rootpw" != "null" ]; then
echo root:$rootpw | chpasswd -e
fi
nodename=$(grep ^NODENAME: /etc/confluent/confluent.info | awk '{print $2}')
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg |awk '{print $2}')
while ! ping -c 1 $confluent_mgr >& /dev/null; do
sleep 1
done
source /etc/confluent/functions
run_remote_parts firstboot.d
run_remote_config firstboot.d
systemctl disable firstboot
curl -f -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" -X POST -d "status: complete" https://$confluent_mgr/confluent-api/self/updatestatus

View File

@@ -0,0 +1,216 @@
#!/bin/bash
function test_mgr() {
whost=$1
if [[ "$whost" == *:* ]] && [[ "$whost" != *[* ]] ; then
whost="[$whost]"
fi
if curl -gs https://${whost}/confluent-api/ > /dev/null; then
return 0
fi
return 1
}
function initconfluentscriptstmp() {
if [ -z "$confluentscripttmpdir" ]; then
mkdir -p /opt/confluent/tmpexec
confluentscripttmpdir=$(mktemp -d /opt/confluent/tmpexec/confluentscripts.XXXXXXXXX)
fi
}
function confluentpython() {
if [ -x /usr/libexec/platform-python ]; then
/usr/libexec/platform-python $*
elif [ -x /usr/bin/python3 ]; then
/usr/bin/python3 $*
elif [ -x /usr/bin/python ]; then
/usr/bin/python $*
elif [ -x /usr/bin/python2 ]; then
/usr/bin/python2 $*
fi
}
function set_confluent_vars() {
if [ -z "$nodename" ]; then
nodename=$(grep ^NODENAME: /etc/confluent/confluent.info | awk '{print $2}')
fi
if [[ "$confluent_mgr" == *"%"* ]]; then
confluent_mgr=""
fi
if [ -z "$confluent_mgr" ]; then
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
if ! test_mgr $confluent_mgr; then
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
if [[ "$confluent_mgr" = *":"* ]]; then
confluent_mgr="[$confluent_mgr]"
fi
fi
if ! test_mgr $confluent_mgr; then
BESTMGRS=$(grep ^EXTMGRINFO: /etc/confluent/confluent.info | grep '|1$' | sed -e 's/EXTMGRINFO: //' -e 's/|.*//')
OKMGRS=$(grep ^EXTMGRINFO: /etc/confluent/confluent.info | grep '|0$' | sed -e 's/EXTMGRINFO: //' -e 's/|.*//')
for confluent_mgr in $BESTMGRS $OKMGRS; do
if [[ $confluent_mgr == *":"* ]]; then
confluent_mgr="[$confluent_mgr]"
fi
if test_mgr $confluent_mgr; then
break
fi
done
fi
fi
if [ -z "$confluent_profile" ]; then
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
fi
export confluent_profile confluent_mgr nodename
}
fetch_remote() {
curlargs=""
if [ -f /etc/confluent/ca.pem ]; then
curlargs=" --cacert /etc/confluent/ca.pem"
fi
set_confluent_vars
mkdir -p $(dirname $1)
whost=$confluent_mgr
if [[ "$whost" == *:* ]] && [[ "$whost" != *[* ]] ; then
whost="[$whost]"
fi
curl -gf -sS $curlargs https://$whost/confluent-public/os/$confluent_profile/scripts/$1 > $1
if [ $? != 0 ]; then echo $1 failed to download; return 1; fi
}
source_remote_parts() {
unset confluentscripttmpdir
initconfluentscriptstmp
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
fi
scriptlist=$(confluentpython $apiclient /confluent-api/self/scriptlist/$1|sed -e 's/^- //')
for script in $scriptlist; do
source_remote $1/$script
done
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
}
run_remote_parts() {
unset confluentscripttmpdir
initconfluentscriptstmp
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
fi
scriptlist=$(confluentpython $apiclient /confluent-api/self/scriptlist/$1|sed -e 's/^- //')
for script in $scriptlist; do
run_remote $1/$script
done
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
}
source_remote() {
set_confluent_vars
unsettmpdir=0
echo
echo '---------------------------------------------------------------------------'
echo Sourcing $1 from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
initconfluentscriptstmp
echo Sourcing from $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
if [ $? != 0 ]; then echo $1 failed to download; return 1; fi
chmod +x $1
cmd=$1
shift
source ./$cmd
cd - > /dev/null
if [ "$unsettmpdir" = 1 ]; then
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
unsettmpdir=0
fi
rm -rf $confluentscripttmpdir
return $retcode
}
run_remote() {
requestedcmd="'$*'"
unsettmpdir=0
set_confluent_vars
echo
echo '---------------------------------------------------------------------------'
echo Running $requestedcmd from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
if [ -z "$confluentscripttmpdir" ]; then
unsettmpdir=1
fi
initconfluentscriptstmp
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
if [ $? != 0 ]; then echo $requestedcmd failed to download; return 1; fi
chmod +x $1
cmd=$1
if [ -x /usr/bin/chcon ]; then
chcon system_u:object_r:bin_t:s0 $cmd
fi
shift
./$cmd $*
retcode=$?
if [ $retcode -ne 0 ]; then
echo "$requestedcmd exited with code $retcode"
fi
cd - > /dev/null
if [ "$unsettmpdir" = 1 ]; then
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
unsettmpdir=0
fi
return $retcode
}
run_remote_python() {
echo
set_confluent_vars
if [ -f /etc/confluent/ca.pem ]; then
curlargs=" --cacert /etc/confluent/ca.pem"
fi
echo '---------------------------------------------------------------------------'
echo Running python script "'$*'" from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
unset confluentscripttmpdir
initconfluentscriptstmp
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
mkdir -p $(dirname $1)
whost=$confluent_mgr
if [[ "$whost" == *:* ]] && [[ "$whost" != *[* ]] ; then
whost="[$whost]"
fi
curl -gf -sS $curlargs https://$whost/confluent-public/os/$confluent_profile/scripts/$1 > $1
if [ $? != 0 ]; then echo "'$*'" failed to download; return 1; fi
confluentpython $*
retcode=$?
echo "'$*' exited with code $retcode"
cd - > /dev/null
rm -rf $confluentscripttmpdir
unset confluentscripttmpdir
return $retcode
}
run_remote_config() {
echo
set_confluent_vars
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
fi
echo '---------------------------------------------------------------------------'
echo Requesting to run remote configuration for "'$*'" from $confluent_mgr under profile $confluent_profile
confluentpython $apiclient /confluent-api/self/remoteconfig/"$*" -d {}
confluentpython $apiclient /confluent-api/self/remoteconfig/status -w 204
echo
echo 'Completed remote configuration'
echo '---------------------------------------------------------------------------'
return
}
#If invoked as a command, use the arguments to actually run a function
(return 0 2>/dev/null) || $1 "${@:2}"

View File

@@ -0,0 +1,67 @@
#!/bin/bash
mkdir -p /run/sshd
mkdir -p /root/.ssh
cat /tmp/ssh/*pubkey >> /root/.ssh/authorized_keys
cat /tmp/ssh/*.ca | sed -e s/^/'@cert-authority * '/ >> /etc/ssh/ssh_known_hosts
chmod 700 /etc/confluent
chmod go-rwx /etc/confluent/*
sshconf=/etc/ssh/ssh_config
if [ -d /etc/ssh/ssh_config.d/ ]; then
sshconf=/etc/ssh/ssh_config.d/01-confluent.conf
fi
echo 'Host *' >> $sshconf
echo ' HostbasedAuthentication yes' >> $sshconf
echo ' EnableSSHKeysign yes' >> $sshconf
echo ' HostbasedKeyTypes *ed25519*' >> $sshconf
/usr/sbin/sshd
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg | awk '{print $2}')
mkdir -p /opt/confluent/bin
python3 /opt/confluent/bin/apiclient /confluent-public/os/$confluent_profile/scripts/firstboot.sh > /opt/confluent/bin/firstboot.sh
chmod +x /opt/confluent/bin/firstboot.sh
python3 /opt/confluent/bin/apiclient /confluent-public/os/$confluent_profile/scripts/firstboot.service > /etc/systemd/system/firstboot.service
systemctl enable firstboot
python3 /opt/confluent/bin/apiclient /confluent-public/os/$confluent_profile/scripts/functions > /etc/confluent/functions
source /etc/confluent/functions
python3 /opt/confluent/bin/apiclient /confluent-api/self/nodelist | sed -e s/'^- //' > /tmp/allnodes
cp /tmp/allnodes /root/.shosts
cp /tmp/allnodes /etc/ssh/shosts.equiv
if grep ^ntpservers: /etc/confluent/confluent.deploycfg > /dev/null; then
ntps=$(sed -n '/^ntpservers:/,/^[^-]/p' /etc/confluent/confluent.deploycfg|sed 1d|sed '$d' | sed -e 's/^- //' | paste -sd ' ')
sed -i "s/#NTP=/NTP=$ntps/" /etc/systemd/timesyncd.conf
fi
textcons=$(grep ^textconsole: /etc/confluent/confluent.deploycfg |awk '{print $2}')
updategrub=0
if [ "$textcons" = "true" ] && ! grep console= /proc/cmdline > /dev/null; then
cons=""
if [ -f /tmp/autocons.info ]; then
cons=$(cat /tmp/autocons.info)
fi
if [ ! -z "$cons" ]; then
sed -i 's/GRUB_CMDLINE_LINUX="\([^"]*\)"/GRUB_CMDLINE_LINUX="\1 console='${cons#/dev/}'"/' /etc/default/grub
updategrub=1
fi
fi
kargs=$(python3 /opt/confluent/bin/apiclient /confluent-public/os/$confluent_profile/profile.yaml | grep ^installedargs: | sed -e 's/#.*//')
if [ ! -z "$kargs" ]; then
sed -i 's/GRUB_CMDLINE_LINUX="\([^"]*\)"/GRUB_CMDLINE_LINUX="\1 '"${kargs}"'"/' /etc/default/grub
fi
if [ 1 = $updategrub ]; then
update-grub
fi
if [ -e /sys/firmware/efi ]; then
bootnum=$(efibootmgr | grep ubuntu | sed -e 's/ .*//' -e 's/\*//' -e s/Boot//)
if [ ! -z "$bootnum" ]; then
currboot=$(efibootmgr | grep ^BootOrder: | awk '{print $2}')
nextboot=$(echo $currboot| awk -F, '{print $1}')
[ "$nextboot" = "$bootnum" ] || efibootmgr -o $bootnum,$currboot
efibootmgr -D
fi
fi
run_remote_python syncfileclient
run_remote_parts post.d
run_remote_config post
python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged'

View File

@@ -1,18 +1,88 @@
anna-install openssh-server-udeb
mkdir -p ~/.ssh/
cat /ssh/*pubkey > ~/.ssh/authorized_keys
ssh-keygen -A
mgr=$(grep ^MANAGER:.*\\. /etc/confluent/confluent.info|head -n 1|cut -d: -f 2|sed -e 's/ //')
nodename=$(grep ^NODENAME: /etc/confluent/confluent.info|head -n 1|cut -d: -f 2|sed -e 's/ //')
#!/bin/sh
## Use the following option to add additional boot parameters for the
## installed system (if supported by the bootloader installer).
## Note: options passed to the installer will be added automatically.
#d-i debian-installer/add-kernel-opts string [from profile.yaml]
deploycfg=/etc/confluent/confluent.deploycfg
mgr=$(cat /etc/confluent/deployer)
cryptboot=$(grep encryptboot: $deploycfg|sed -e 's/^encryptboot: //')
if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then
echo "****Encrypted boot requested, but not implemented for this OS, halting install" > /dev/console
[ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but not implemented for this OS,halting install" >> $(cat /tmp/autoconsdev))
while :; do sleep 86400; done
fi
cat > /usr/lib/live-installer.d/confluent-certs << EOF
#!/bin/sh
cp /tls/* /target/etc/ssl/certs/
cat /tls/*.pem >> /target/etc/ssl/certs/ca-certificates.crt
EOF
chmod a+x /usr/lib/live-installer.d/confluent-certs
mkdir -p /.ssh/
cat /ssh/*pubkey > /.ssh/authorized_keys
mkdir -p /etc/ssh
nodename=$(grep ^NODENAME: /etc/confluent/confluent.info|cut -d ' ' -f 2)
apikey=$(cat /etc/confluent/confluent.apikey)
ssh-keygen -A
for pubkey in /etc/ssh/ssh_host*key.pub; do
certfile=${pubkey%.pub}-cert.pub
certfile=$(echo $pubkey | sed -e s/.pub/-cert.pub/)
keyfile=${pubkey%.pub}
wget --post-file=$pubkey --header='CONFLUENT_NODENAME: '$nodename --header="CONFLUENT_APIKEY: $apikey" https://$mgr/confluent-api/self/sshcert -O $certfile
wget --header="CONFLUENT_NODENAME: $nodename" --header="CONFLUENT_APIKEY: $apikey" --post-file=$pubkey https://$mgr/confluent-api/self/sshcert -O $certfile --quiet
echo HostKey $keyfile >> /etc/ssh/sshd_config
echo HostCertificate $certfile >> /etc/ssh/sshd_config
done
echo sshd:x:939:939::/: >> /etc/passwd
if [ -e /tmp/installdisk ]; then
instdisk=$(cat /tmp/installdisk)
else
for blockdev in $(ls /sys/class/block/); do
shortname=$(basename $blockdev)
if [ "$shortname" != "${shortname%loop*}" ]; then
continue
fi
udevadm info --query=property /dev/$shortname |grep DEVTYPE=disk > /dev/null || continue # ignore partitions
udevadm info --query=property /dev/$shortname |grep DM_NAME > /dev/null && continue # not a real disk
sz=$(cat /sys/block/$shortname/size 2> /dev/null)
[ -z "$sz" ] && continue
[ $sz -lt 1048576 ] && continue # Too small
[ -z "$firstdisk" ] && firstdisk=$shortname
if udevadm info --query=property /dev/$shortname|grep ID_MODEL=| sed -e s/' '/_/g | grep -iE '(thinksystem_m.2|m.2_nvme_2-bay_raid_kit)' > /dev/null; then
instdisk=$shortname
break
fi
if udevadm info --query=property /dev/$shortname|grep MD_CONTAINER=imsm; then
sraid=$sortname
else
drv=$(udevadm info -a /dev/sdb|grep DRIVERS==|grep -Ev '""|"sd"' | sed -e s/.*=// -e s/'"'//g)
if [ "ahci" = "$drv" -a -z "$onbdisk" ]; then
onbdisk=$shortname
elif [ "megaraid" = "$drv" -a -z "$rdisk" ]; then
rdisk=$shortname
fi
fi
done
fi
if [ -z "$instdisk" ]; then
if [ ! -z "$sraid"]; then
instdisk=$sraid
elif [ ! -z "$onbdisk" ]; then
instdisk=$onbdisk
elif [ ! -z "$rdisk" ]; then
instdisk=$rdisk
else
instdisk=$firstdisk
fi
fi
if [ ! -z "$instdisk" ]; then
debconf-set partman-auto/disk /dev/$instdisk
debconf-set grub-installer/bootdev /dev/$instdisk
fi
echo HostbasedAuthentication yes >> /etc/ssh/sshd_config
echo HostbasedUsesNameFromPacketOnly yes >> /etc/ssh/sshd_config
echo IgnoreRhosts no >> /etc/ssh/sshd_config
echo sshd:x:1:1::/run/sshd:/bin/false >> /etc/passwd
/usr/sbin/sshd
wget --header="CONFLUENT_NODENAME: $nodename" --header="CONFLUENT_APIKEY: $apikey" https://$mgr/confluent-api/self/nodelist -O /tmp/allnodes --quiet
#kill -HUP $(ps | grep -v grep | grep /usr/sbin/sshd | sed -e 's/^ *//'|cut -d ' ' -f 1)
#curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/getinstalldisk > /tmp/getinstalldisk
#python3 /tmp/getinstalldisk
#sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml

View File

@@ -0,0 +1,19 @@
#!/bin/sh
mount -o bind /sys /target/sys
mount -o bind /dev /target/dev
mount -o bind /dev/pts /target/dev/pts
mount -o bind /proc /target/proc
mount -o bind /dev/pts /target/dev/pts
mount -o bind /run /target/run
cp -a /etc/confluent /target/etc/confluent
cp -a /opt/confluent /target/opt/confluent
mv /tmp/post.sh /target/tmp/
cp -a /ssh /tls /target/tmp
cat /tls/*.pem >> /target/etc/confluent/ca.pem
cp -a /etc/ssh/ssh_host_* /target/etc/ssh/
grep HostCertificate /etc/ssh/sshd_config >> /target/etc/ssh/sshd_config
echo Port 2222 >> /etc/ssh/sshd_config
kill -HUP $(ps |grep -v grep|grep sshd|grep /usr|sed -e s/' root.*//')
cp /tls/* /target/etc/ssl/certs/
cat /tls/*.pem >> /target/etc/ssl/certs/ca-certificates.crt
chroot /target bash /tmp/post.sh

View File

@@ -0,0 +1,5 @@
#!/bin/bash
# Add this to firstboot.d
export DEBIAN_FRONTEND=noninteractive
apt-get -y install proxmox-ve postfix open-iscsi chrony < /dev/null

View File

@@ -0,0 +1,26 @@
#!/bin/bash
# This script would run in post.d
#
export DEBIAN_FRONTEND=noninteractive
codename=$(grep ^VERSION_CODENAME /etc/os-release | cut -d= -f2)
echo "deb [arch=amd64] http://download.proxmox.com/debian/pve $codename pve-no-subscription" > /etc/apt/sources.list.d/pve-install-repo.list
wget https://enterprise.proxmox.com/debian/proxmox-release-$codename.gpg -O /etc/apt/trusted.gpg.d/proxmox-release-$codename.gpg
sum=$(sha512sum /etc/apt/trusted.gpg.d/proxmox-release-$codename.gpg)
if [ $codename == "bookworm" ]; then
expectedsum=7da6fe34168adc6e479327ba517796d4702fa2f8b4f0a9833f5ea6e6b48f6507a6da403a274fe201595edc86a84463d50383d07f64bdde2e3658108db7d6dc87
elif [ $codename == "trixie" ]; then
expectedsum=8678f2327c49276615288d7ca11e7d296bc8a2b96946fe565a9c81e533f9b15a5dbbad210a0ad5cd46d361ff1d3c4bac55844bc296beefa4f88b86e44e69fa51
fi
if [ "$sum" -ne "$expectedsum" ]; then
echo "Mismatch in fingerprint!"
rm /etc/apt/trusted.gpg.d/proxmox-release-$codename.gpg
exit 1
fi
apt-get update && apt-get -y full-upgrade < /dev/null
apt-get -y install proxmox-default-kernel < /dev/null
apt-get -y remove linux-image-amd64 'linux-image-6.1*' < /dev/null
update-grub
apt-get -y remove os-prober < /dev/null

View File

@@ -0,0 +1,45 @@
[ -f /lib/confluent/functions ] && . /lib/confluent/functions
[ -f /etc/confluent/functions ] && . /etc/confluent/functions
[ -f /opt/confluent/bin/apiclient ] && confapiclient=/opt/confluent/bin/apiclient
[ -f /etc/confluent/apiclient ] && confapiclient=/etc/confluent/apiclient
for pubkey in /etc/ssh/ssh_host*key.pub; do
if [ "$pubkey" = /etc/ssh/ssh_host_key.pub ]; then
continue
fi
certfile=${pubkey/.pub/-cert.pub}
confluentpython $confapiclient /confluent-api/self/sshcert $pubkey -o $certfile
done
if [ -d /etc/ssh/sshd_config.d/ -a ! -e /etc/ssh/sshd_config.d/90-confluent.conf ]; then
for cert in /etc/ssh/ssh*-cert.pub; do
echo HostCertificate $cert >> /etc/ssh/sshd_config.d/90-confluent.conf
done
echo HostbasedAuthentication yes >> /etc/ssh/sshd_config.d/90-confluent.conf
echo HostbasedUsesNameFromPacketOnly yes >> /etc/ssh/sshd_config.d/90-confluent.conf
echo IgnoreRhosts no >> /etc/ssh/sshd_config.d/90-confluent.conf
fi
TMPDIR=$(mktemp -d)
cd $TMPDIR
confluentpython $confapiclient /confluent-public/site/initramfs.tgz -o initramfs.tgz
tar xf initramfs.tgz
for ca in ssh/*.ca; do
LINE=$(cat $ca)
if [ -z "$LINE" ]; then continue; fi
cp -af /etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts.new
grep -v "$LINE" /etc/ssh/ssh_known_hosts > /etc/ssh/ssh_known_hosts.new
echo '@cert-authority *' $LINE >> /etc/ssh/ssh_known_hosts.new
mv /etc/ssh/ssh_known_hosts.new /etc/ssh/ssh_known_hosts
done
for pubkey in ssh/*.*pubkey; do
LINE=$(cat $pubkey)
if [ -z "$LINE" ]; then continue; fi
cp -af /root/.ssh/authorized_keys /root/.ssh/authorized_keys.new
grep -v "$LINE" /root/.ssh/authorized_keys > /root/.ssh/authorized_keys.new
echo "$LINE" >> /root/.ssh/authorized_keys.new
mv /root/.ssh/authorized_keys.new /root/.ssh/authorized_keys
done
confluentpython $confapiclient /confluent-api/self/nodelist | sed -e 's/^- //' > /etc/ssh/shosts.equiv
cat /etc/ssh/shosts.equiv > /root/.shosts
cd -
rm -rf $TMPDIR
systemctl try-restart sshd

View File

@@ -0,0 +1,327 @@
get_remote_apikey() {
while [ -z "$confluent_apikey" ]; do
/opt/confluent/bin/clortho $nodename $confluent_mgr > /etc/confluent/confluent.apikey
if grep ^SEALED: /etc/confluent/confluent.apikey > /dev/null; then
# we don't support remote sealed api keys anymore
echo > /etc/confluent/confluent.apikey
fi
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
if [ -z "$confluent_apikey" ]; then
echo "Unable to acquire node api key, set deployment.apiarmed=once on node '$nodename', retrying..."
if [ ! -z "$autoconsdev" ]; then echo "Unable to acquire node api key, set deployment.apiarmed=once on node '$nodename', retrying..." > $autoconsdev; fi
sleep 10
elif [ -c /dev/tpmrm0 ]; then
tmpdir=$(mktemp -d)
cd $tmpdir
tpm2_startauthsession --session=session.ctx
tpm2_policypcr -Q --session=session.ctx --pcr-list="sha256:15" --policy=pcr15.sha256.policy
tpm2_createprimary -G ecc -Q --key-context=prim.ctx
(echo -n "CONFLUENT_APIKEY:";cat /etc/confluent/confluent.apikey) | tpm2_create -Q --policy=pcr15.sha256.policy --public=data.pub --private=data.priv -i - -C prim.ctx
tpm2_load -Q --parent-context=prim.ctx --public=data.pub --private=data.priv --name=confluent.apikey --key-context=data.ctx
tpm2_evictcontrol -Q -c data.ctx
tpm2_flushcontext session.ctx
cd - > /dev/null
rm -rf $tmpdir
fi
done
}
root=1
rootok=1
netroot=confluent
echo -ne '\033[H\033[2J\033[3J'
mkdir -p /etc/ssh
mkdir -p /var/tmp/
mkdir -p /var/empty/sshd
mkdir -p /usr/share/empty.sshd
mkdir -p /etc/confluent
sed -i '/^root:x/d' /etc/passwd
echo root:x:0:0::/:/bin/bash >> /etc/passwd
echo sshd:x:30:30:SSH User:/var/empty/sshd:/sbin/nologin >> /etc/passwd
if ! grep console= /proc/cmdline >& /dev/null; then
autocons=$(/opt/confluent/bin/autocons)
autoconsdev=${autocons%,*}
autocons=${autocons##*/}
echo "Automatic console configured for $autocons"
fi
echo "Initializing confluent diskless environment"
echo -n "udevd: "
/usr/lib/systemd/systemd-udevd --daemon
echo -n "Loading drivers..."
udevadm trigger
udevadm trigger --type=devices --action=add
udevadm settle
modprobe ib_ipoib
modprobe ib_umad
modprobe hfi1
modprobe mlx5_ib
echo "done"
cat > /etc/ssh/sshd_config << EOF
Port 2222
Subsystem sftp /usr/libexec/openssh/sftp-server
PermitRootLogin yes
AuthorizedKeysFile .ssh/authorized_keys
EOF
mkdir /root/.ssh
mkdir /.ssh
cat /ssh/*pubkey > /root/.ssh/authorized_keys 2>/dev/null
cp /root/.ssh/authorized_keys /.ssh/
cat /tls/*.pem > /etc/confluent/ca.pem
mkdir -p /etc/pki/tls/certs
cat /tls/*.pem > /etc/pki/tls/certs/ca-bundle.crt
TRIES=0
oldumask=$(umask)
umask 0077
tpmdir=$(mktemp -d)
cd $tpmdir
lasthdl=""
if [ -c /dev/tpmrm0 ]; then
for hdl in $(tpm2_getcap handles-persistent|awk '{print $2}'); do
tpm2_startauthsession --policy-session --session=session.ctx
tpm2_policypcr -Q --session=session.ctx --pcr-list="sha256:15" --policy=pcr15.sha256.policy
unsealeddata=$(tpm2_unseal --auth=session:session.ctx -Q -c $hdl 2>/dev/null)
tpm2_flushcontext session.ctx
if [[ $unsealeddata == "CONFLUENT_APIKEY:"* ]]; then
confluent_apikey=${unsealeddata#CONFLUENT_APIKEY:}
echo $confluent_apikey > /etc/confluent/confluent.apikey
if [ -n "$lasthdl" ]; then
tpm2_evictcontrol -c $lasthdl
fi
lasthdl=$hdl
fi
done
fi
cd - > /dev/null
rm -rf $tpmdir
touch /etc/confluent/confluent.info
cd /sys/class/net
echo -n "Scanning for network configuration..."
while ! grep ^EXTMGRINFO: /etc/confluent/confluent.info | awk -F'|' '{print $3}' | grep 1 >& /dev/null && [ "$TRIES" -lt 30 ]; do
TRIES=$((TRIES + 1))
for i in *; do
ip link set $i up
done
/opt/confluent/bin/copernicus -t > /etc/confluent/confluent.info
echo -n .
done
TRIES=0
while ! grep ^NODENAME: /etc/confluent/confluent.info >& /dev/null && [ "$TRIES" -lt 300 ]; do
sleep 0.5
echo -n .
/opt/confluent/bin/copernicus -t > /etc/confluent/confluent.info
TRIES=$((TRIES + 1))
done
cd /
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
hostname $nodename
confluent_mgr=$(grep '^EXTMGRINFO:.*1$' /etc/confluent/confluent.info | head -n 1 | awk -F': ' '{print $2}' | awk -F'|' '{print $1}')
if [ -z "$confluent_mgr" ]; then
confluent_mgr=$(grep ^MANAGER: /etc/confluent/confluent.info|head -n 1 | awk '{print $2}')
fi
if [[ $confluent_mgr == *%* ]]; then
echo $confluent_mgr | awk -F% '{print $2}' > /tmp/confluent.ifidx
ifidx=$(cat /tmp/confluent.ifidx)
ifname=$(ip link |grep ^$ifidx:|awk '{print $2}')
ifname=${ifname%:}
fi
ready=0
while [ $ready = "0" ]; do
get_remote_apikey
if [[ $confluent_mgr == *:* ]] && [[ $confluent_mgr != "["* ]]; then
confluent_mgr="[$confluent_mgr]"
fi
tmperr=$(mktemp)
curl -sSf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/deploycfg2 > /etc/confluent/confluent.deploycfg 2> $tmperr
if grep 401 $tmperr > /dev/null; then
confluent_apikey=""
if [ -n "$lasthdl" ]; then
tpm2_evictcontrol -c $lasthdl
fi
confluent_mgr=${confluent_mgr#[}
confluent_mgr=${confluent_mgr%]}
elif grep 'SSL' $tmperr > /dev/null; then
confluent_mgr=${confluent_mgr#[}
confluent_mgr=${confluent_mgr%]}
echo 'Failure establishing TLS conneection to '$confluent_mgr' (try `osdeploy initialize -t` on the deployment server)'
if [ ! -z "$autoconsdev" ]; then echo 'Failure establishing TLS conneection to '$confluent_mgr' (try `osdeploy initialize -t` on the deployment server)' > $autoconsdev; fi
sleep 10
else
ready=1
fi
rm $tmperr
done
if [ ! -z "$autocons" ] && grep "textconsole: true" /etc/confluent/confluent.deploycfg > /dev/null; then /opt/confluent/bin/autocons -c > /dev/null; fi
if [ -c /dev/tpmrm0 ]; then
tpm2_pcrextend 15:sha256=2fbe96c50dde38ce9cd2764ddb79c216cfbcd3499568b1125450e60c45dd19f2
fi
umask $oldumask
mkdir -p /run/NetworkManager/system-connections
cat > /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
[connection]
EOC
echo id=${ifname} >> /run/NetworkManager/system-connections/$ifname.nmconnection
echo uuid=$(uuidgen) >> /run/NetworkManager/system-connections/$ifname.nmconnection
linktype=$(ip link show dev ${ifname}|grep link/|awk '{print $1}')
if [ "$linktype" = link/infiniband ]; then
linktype="infiniband"
else
linktype="ethernet"
fi
echo type=$linktype >> /run/NetworkManager/system-connections/$ifname.nmconnection
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
autoconnect-retries=1
EOC
echo interface-name=$ifname >> /run/NetworkManager/system-connections/$ifname.nmconnection
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
multi-connect=1
permissions=
wait-device-timeout=60000
EOC
if [ "$linktype" = infiniband ]; then
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
[infiniband]
transport-mode=datagram
EOC
fi
autoconfigmethod=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
auto6configmethod=$(grep ^ipv6_method: /etc/confluent/confluent.deploycfg |awk '{print $2}')
if [ "$autoconfigmethod" = "dhcp" ]; then
echo -n "Attempting to use dhcp to bring up $ifname..."
dhcpcd $ifname
echo "Complete:"
ip addr show dev $ifname
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg| awk '{print $2}')
elif [ "$autoconfigmethod" = "static" ]; then
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg| awk '{print $2}')
v4addr=$(grep ^ipv4_address: /etc/confluent/confluent.deploycfg)
v4addr=${v4addr#ipv4_address: }
v4gw=$(grep ^ipv4_gateway: /etc/confluent/confluent.deploycfg)
v4gw=${v4gw#ipv4_gateway: }
if [ "$v4gw" = "null" ]; then
v4gw=""
fi
v4nm=$(grep ^prefix: /etc/confluent/confluent.deploycfg)
v4nm=${v4nm#prefix: }
echo "Setting up $ifname as static at $v4addr/$v4nm"
ip addr add dev $ifname $v4addr/$v4nm
if [ ! -z "$v4gw" ]; then
ip route add default via $v4gw
fi
echo '[ipv4]' >> /run/NetworkManager/system-connections/$ifname.nmconnection
echo address1=$v4addr/$v4nm >> /run/NetworkManager/system-connections/$ifname.nmconnection
if [ ! -z "$v4gw" ]; then
echo gateway=$v4gw >> /run/NetworkManager/system-connections/$ifname.nmconnection
fi
nameserversec=0
nameservers=""
while read -r entry; do
if [ $nameserversec = 1 ]; then
if [[ $entry == "-"*.* ]]; then
nameservers="$nameservers"${entry#- }";"
continue
fi
fi
nameserversec=0
if [ "${entry%:*}" = "nameservers" ]; then
nameserversec=1
continue
fi
done < /etc/confluent/confluent.deploycfg
echo dns=$nameservers >> /run/NetworkManager/system-connections/$ifname.nmconnection
dnsdomain=$(grep ^dnsdomain: /etc/confluent/confluent.deploycfg)
dnsdomain=${dnsdomain#dnsdomain: }
echo dns-search=$dnsdomain >> /run/NetworkManager/system-connections/$ifname.nmconnection
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
may-fail=false
method=manual
[ipv6]
addr-gen-mode=eui64
method=auto
EOC
elif [ "$auto6configmethod" = "static" ]; then
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg| awk '{print $2}')
v6addr=$(grep ^ipv6_address: /etc/confluent/confluent.deploycfg)
v6addr=${v6addr#ipv6_address: }
v6gw=$(grep ^ipv6_gateway: /etc/confluent/confluent.deploycfg)
v6gw=${v6gw#ipv6_gateway: }
if [ "$v6gw" = "null" ]; then
v6gw=""
fi
v6nm=$(grep ^ipv6_prefix: /etc/confluent/confluent.deploycfg)
v6nm=${v6nm#ipv6_prefix: }
echo "Setting up $ifname as static at $v6addr/$v6nm"
ip addr add dev $ifname $v6addr/$v6nm
cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
[ipv4]
dhcp-timeout=90
dhcp-vendor-class-identifier=anaconda-Linux
method=disabled
[ipv6]
addr-gen-mode=eui64
method=manual
may-fail=false
EOC
echo address1=$v6addr/$v6nm >> /run/NetworkManager/system-connections/$ifname.nmconnection
if [ ! -z "$v6gw" ]; then
ip route add default via $v6gw
echo gateway=$v6gw >> /run/NetworkManager/system-connections/$ifname.nmconnection
fi
nameserversec=0
nameservers=""
while read -r entry; do
if [ $nameserversec = 1 ]; then
if [[ $entry == "-"*:* ]]; then
nameservers="$nameservers"${entry#- }";"
continue
fi
fi
nameserversec=0
if [ "${entry%:*}" = "nameservers" ]; then
nameserversec=1
continue
fi
done < /etc/confluent/confluent.deploycfg
echo dns=$nameservers >> /run/NetworkManager/system-connections/$ifname.nmconnection
dnsdomain=$(grep ^dnsdomain: /etc/confluent/confluent.deploycfg)
dnsdomain=${dnsdomain#dnsdomain: }
echo dns-search=$dnsdomain >> /run/NetworkManager/system-connections/$ifname.nmconnection
fi
echo '[proxy]' >> /run/NetworkManager/system-connections/$ifname.nmconnection
chmod 600 /run/NetworkManager/system-connections/*.nmconnection
confluent_websrv=$confluent_mgr
if [[ $confluent_websrv == *:* ]] && [[ $confluent_websrv != "["* ]]; then
confluent_websrv="[$confluent_websrv]"
fi
echo -n "Initializing ssh..."
ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -C '' -N ''
for pubkey in /etc/ssh/ssh_host*key.pub; do
certfile=${pubkey/.pub/-cert.pub}
privfile=${pubkey%.pub}
curl -sf -X POST -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" -d @$pubkey https://$confluent_websrv/confluent-api/self/sshcert > $certfile
if [ -s $certfile ]; then
echo HostCertificate $certfile >> /etc/ssh/sshd_config
fi
echo HostKey $privfile >> /etc/ssh/sshd_config
done
/usr/sbin/sshd
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg| awk '{print $2}')
confluent_proto=$(grep ^protocol: /etc/confluent/confluent.deploycfg| awk '{print $2}')
confluent_urls=""
for addr in $(grep ^MANAGER: /etc/confluent/confluent.info|awk '{print $2}'|sed -e s/%/%25/); do
if [[ $addr == *:* ]]; then
confluent_urls="$confluent_urls $confluent_proto://[$addr]/confluent-public/os/$confluent_profile/rootimg.sfs"
else
confluent_urls="$confluent_urls $confluent_proto://$addr/confluent-public/os/$confluent_profile/rootimg.sfs"
fi
done
mkdir -p /etc/confluent
curl -sf https://$confluent_websrv/confluent-public/os/$confluent_profile/scripts/functions > /etc/confluent/functions
. /etc/confluent/functions
source_remote imageboot.sh

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/add_local_repositories

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/firstboot.custom

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/firstboot.service

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/firstboot.sh

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/functions

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/getinstalldisk

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/image2disk.py

View File

@@ -0,0 +1,186 @@
. /lib/dracut-lib.sh
confluent_whost=$confluent_mgr
if [[ "$confluent_whost" == *:* ]] && [[ "$confluent_whost" != "["* ]]; then
confluent_whost="[$confluent_mgr]"
fi
mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay /sysroot
if [ "untethered" = "$(getarg confluent_imagemethod)" -o "uncompressed" = "$(getarg confluent_imagemethod)" ]; then
mount -t tmpfs untethered /mnt/remoteimg
curl https://$confluent_whost/confluent-public/os/$confluent_profile/rootimg.sfs -o /mnt/remoteimg/rootimg.sfs
else
confluent_urls="$confluent_urls https://$confluent_whost/confluent-public/os/$confluent_profile/rootimg.sfs"
/opt/confluent/bin/urlmount $confluent_urls /mnt/remoteimg
fi
/opt/confluent/bin/confluent_imginfo /mnt/remoteimg/rootimg.sfs > /tmp/rootimg.info
loopdev=$(losetup -f)
export mountsrc=$loopdev
losetup -r $loopdev /mnt/remoteimg/rootimg.sfs
if grep '^Format: confluent_crypted' /tmp/rootimg.info > /dev/null; then
while ! curl -sf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $(cat /etc/confluent/confluent.apikey)" https://$confluent_whost/confluent-api/self/profileprivate/pending/rootimg.key > /tmp/rootimg.key; do
echo "Unable to retrieve private key from $confluent_mgr (verify that confluent can access /var/lib/confluent/private/os/$confluent_profile/pending/rootimg.key)"
sleep 1
done
cipher=$(head -n 1 /tmp/rootimg.key)
key=$(tail -n 1 /tmp/rootimg.key)
len=$(wc -c /mnt/remoteimg/rootimg.sfs | awk '{print $1}')
len=$(((len-4096)/512))
dmsetup create cryptimg --table "0 $len crypt $cipher $key 0 $loopdev 8"
/opt/confluent/bin/confluent_imginfo /dev/mapper/cryptimg > /tmp/rootimg.info
mountsrc=/dev/mapper/cryptimg
fi
if grep '^Format: squashfs' /tmp/rootimg.info > /dev/null; then
mount -o ro $mountsrc /mnt/remote
elif grep '^Format: confluent_multisquash' /tmp/rootimg.info; then
tail -n +3 /tmp/rootimg.info | awk '{gsub("/", "_"); print "echo 0 " $4 " linear '$mountsrc' " $3 " | dmsetup create mproot" $7}' > /tmp/setupmount.sh
. /tmp/setupmount.sh
cat /tmp/setupmount.sh |awk '{printf "mount /dev/mapper/"$NF" "; sub("mproot", ""); gsub("_", "/"); print "/mnt/remote"$NF}' > /tmp/mountparts.sh
. /tmp/mountparts.sh
fi
#mount -t tmpfs overlay /mnt/overlay
if [ ! "uncompressed" = "$(getarg confluent_imagemethod)" ]; then
modprobe zram
memtot=$(grep ^MemTotal: /proc/meminfo|awk '{print $2}')
memtot=$((memtot/2))$(grep ^MemTotal: /proc/meminfo | awk '{print $3'})
echo $memtot > /sys/block/zram0/disksize
mkfs.xfs /dev/zram0 > /dev/null
fi
TETHERED=0
if [ "untethered" = "$(getarg confluent_imagemethod)" -o "uncompressed" = "$(getarg confluent_imagemethod)" ]; then
if [ "untethered" = "$(getarg confluent_imagemethod)" ]; then
mount -o discard /dev/zram0 /sysroot
else
mount -t tmpfs disklessroot /sysroot
fi
echo -en "Decrypting and extracting root filesystem: 0%\r"
srcsz=$(du -sk /mnt/remote | awk '{print $1}')
while [ -f /mnt/remoteimg/rootimg.sfs ]; do
dstsz=$(du -sk /sysroot | awk '{print $1}')
pct=$((dstsz * 100 / srcsz))
if [ $pct -gt 99 ]; then
pct=99
fi
echo -en "Decrypting and extracting root filesystem: $pct%\r"
sleep 0.25
done &
cp -ax /mnt/remote/* /sysroot/
umount /mnt/remote
if [ -e /dev/mapper/cryptimg ]; then
dmsetup remove cryptimg
fi
losetup -d $loopdev
rm /mnt/remoteimg/rootimg.sfs
umount /mnt/remoteimg
wait
echo -e "Decrypting and extracting root filesystem: 100%"
else
TETHERED=1
mount -o discard /dev/zram0 /mnt/overlay
if [ ! -f /tmp/mountparts.sh ]; then
mkdir -p /mnt/overlay/upper /mnt/overlay/work
mount -t overlay -o upperdir=/mnt/overlay/upper,workdir=/mnt/overlay/work,lowerdir=/mnt/remote disklessroot /sysroot
else
for srcmount in $(cat /tmp/mountparts.sh | awk '{print $3}'); do
mkdir -p /mnt/overlay${srcmount}/upper /mnt/overlay${srcmount}/work
mount -t overlay -o upperdir=/mnt/overlay${srcmount}/upper,workdir=/mnt/overlay${srcmount}/work,lowerdir=${srcmount} disklesspart /sysroot${srcmount#/mnt/remote}
done
fi
fi
mkdir -p /sysroot/etc/ssh
mkdir -p /sysroot/etc/confluent
mkdir -p /sysroot/root/.ssh
cp /root/.ssh/* /sysroot/root/.ssh
chmod 700 /sysroot/root/.ssh
cp /etc/confluent/* /sysroot/etc/confluent/
cp /etc/ssh/*key* /sysroot/etc/ssh/
for pubkey in /etc/ssh/ssh_host*key.pub; do
certfile=${pubkey/.pub/-cert.pub}
privfile=${pubkey%.pub}
if [ -s $certfile ]; then
echo HostCertificate $certfile >> /sysroot/etc/ssh/sshd_config
fi
echo HostKey $privfile >> /sysroot/etc/ssh/sshd_config
done
mkdir -p /sysroot/dev /sysroot/sys /sysroot/proc /sysroot/run
if [ ! -z "$autocons" ]; then
autocons=${autocons%,*}
mkdir -p /run/systemd/generator/getty.target.wants
ln -s /usr/lib/systemd/system/serial-getty@.service /run/systemd/generator/getty.target.wants/serial-getty@${autocons}.service
fi
while [ ! -e /sysroot/sbin/init ]; do
echo "Failed to access root filesystem or it is missing /sbin/init"
echo "System should be accessible through ssh at port 2222 with the appropriate key"
while [ ! -e /sysroot/sbin/init ]; do
sleep 1
done
done
rootpassword=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg)
rootpassword=${rootpassword#rootpassword: }
if [ "$rootpassword" = "null" ]; then
rootpassword=""
fi
if [ ! -z "$rootpassword" ]; then
sed -i "s@root:[^:]*:@root:$rootpassword:@" /sysroot/etc/shadow
fi
for i in /ssh/*.ca; do
echo '@cert-authority *' $(cat $i) >> /sysroot/etc/ssh/ssh_known_hosts
done
echo HostbasedAuthentication yes >> /sysroot/etc/ssh/sshd_config
echo HostbasedUsesNameFromPacketOnly yes >> /sysroot/etc/ssh/sshd_config
echo IgnoreRhosts no >> /sysroot/etc/ssh/sshd_config
sshconf=/sysroot/etc/ssh/ssh_config
if [ -d /sysroot/etc/ssh/ssh_config.d/ ]; then
sshconf=/sysroot/etc/ssh/ssh_config.d/01-confluent.conf
fi
echo 'Host *' >> $sshconf
echo ' HostbasedAuthentication yes' >> $sshconf
echo ' EnableSSHKeysign yes' >> $sshconf
echo ' HostbasedKeyTypes *ed25519*' >> $sshconf
curl -sf -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $(cat /etc/confluent/confluent.apikey)" https://$confluent_whost/confluent-api/self/nodelist > /sysroot/etc/ssh/shosts.equiv
cp /sysroot/etc/ssh/shosts.equiv /sysroot/root/.shosts
chmod 640 /sysroot/etc/ssh/*_key
cp /tls/*.pem /sysroot/etc/pki/ca-trust/source/anchors/
chroot /sysroot/ update-ca-trust
curl -sf https://$confluent_whost/confluent-public/os/$confluent_profile/scripts/onboot.service > /sysroot/etc/systemd/system/onboot.service
mkdir -p /sysroot/opt/confluent/bin
curl -sf https://$confluent_whost/confluent-public/os/$confluent_profile/scripts/onboot.sh > /sysroot/opt/confluent/bin/onboot.sh
chmod +x /sysroot/opt/confluent/bin/onboot.sh
cp /opt/confluent/bin/apiclient /sysroot/opt/confluent/bin
ln -s /etc/systemd/system/onboot.service /sysroot/etc/systemd/system/multi-user.target.wants/onboot.service
cp /etc/confluent/functions /sysroot/etc/confluent/functions
if grep installtodisk /proc/cmdline > /dev/null; then
. /etc/confluent/functions
run_remote installimage
exec reboot -f
fi
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
mv /lib/firmware /lib/firmware-ramfs
ln -s /sysroot/lib/firmware /lib/firmware
kill $(grep -l ^/usr/lib/systemd/systemd-udevd /proc/*/cmdline|cut -d/ -f 3)
if [ $TETHERED -eq 1 ]; then
(
sleep 86400 &
ONBOOTPID=$!
mkdir -p /run/confluent
echo $ONBOOTPID > /run/confluent/onboot_sleep.pid
wait $ONBOOTPID
dd if=/mnt/remoteimg/rootimg.sfs iflag=nocache count=0 >& /dev/null
rm -rf /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs /lib/firmware-ramfs /usr/lib64/libcrypto.so* /usr/lib64/systemd/ /kernel/ /usr/bin/ /usr/sbin/ /usr/libexec/
) &
while [ ! -f /run/confluent/onboot_sleep.pid ]; do
sleep 0.1
done
else
rm -rf /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs /lib/firmware-ramfs /usr/lib64/libcrypto.so* /usr/lib64/systemd/ /kernel/ /usr/bin/ /usr/sbin/ /usr/libexec/
fi
if grep debugssh /proc/cmdline >& /dev/null; then
exec /opt/confluent/bin/start_root
else
exec /opt/confluent/bin/start_root -s # share mount namespace, keep kernel callbacks intact
fi

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/installimage

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/onboot.custom

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/onboot.service

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/onboot.sh

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/post.sh

View File

@@ -0,0 +1 @@
../../../../el9-diskless/profiles/default/scripts/syncfileclient

View File

@@ -10,6 +10,13 @@ function test_mgr() {
return 1
}
function initconfluentscriptstmp() {
if [ -z "$confluentscripttmpdir" ]; then
mkdir -p /opt/confluent/tmpexec
confluentscripttmpdir=$(mktemp -d /opt/confluent/tmpexec/confluentscripts.XXXXXXXXX)
fi
}
function confluentpython() {
if [ -x /usr/libexec/platform-python ]; then
/usr/libexec/platform-python $*
@@ -53,6 +60,7 @@ function set_confluent_vars() {
if [ -z "$confluent_profile" ]; then
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
fi
export confluent_profile confluent_mgr nodename
}
fetch_remote() {
@@ -71,7 +79,8 @@ fetch_remote() {
}
source_remote_parts() {
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unset confluentscripttmpdir
initconfluentscriptstmp
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
@@ -85,7 +94,8 @@ source_remote_parts() {
}
run_remote_parts() {
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unset confluentscripttmpdir
initconfluentscriptstmp
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
@@ -104,10 +114,7 @@ source_remote() {
echo
echo '---------------------------------------------------------------------------'
echo Sourcing $1 from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
if [ -z "$confluentscripttmpdir" ]; then
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unsettmpdir=1
fi
initconfluentscriptstmp
echo Sourcing from $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
@@ -134,9 +141,9 @@ run_remote() {
echo '---------------------------------------------------------------------------'
echo Running $requestedcmd from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
if [ -z "$confluentscripttmpdir" ]; then
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unsettmpdir=1
fi
initconfluentscriptstmp
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
@@ -169,7 +176,8 @@ run_remote_python() {
fi
echo '---------------------------------------------------------------------------'
echo Running python script "'$*'" from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unset confluentscripttmpdir
initconfluentscriptstmp
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
mkdir -p $(dirname $1)

View File

@@ -2,6 +2,9 @@
import subprocess
import os
class SilentException(Exception):
pass
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
@@ -24,9 +27,11 @@ class DiskInfo(object):
continue
k, v = prop.split('=', 1)
if k == 'DEVTYPE' and v != 'disk':
if v == 'partition':
raise SilentException('Partition')
raise Exception('Not a disk')
elif k == 'DM_NAME':
raise Exception('Device Mapper')
raise SilentException('Device Mapper')
elif k == 'ID_MODEL':
self.model = v
elif k == 'DEVPATH':
@@ -50,13 +55,17 @@ class DiskInfo(object):
self.driver = v.replace('"', '')
elif k == 'ATTRS{subsystype}':
self.subsystype = v.replace('"', '')
elif k == 'ATTR{ro}' and v == '"1"':
raise Exception("Device is read-only")
if not self.driver and 'imsm' not in self.mdcontainer and self.subsystype != 'nvm':
raise Exception("No driver detected")
if self.driver == 'sr':
raise Exception('cd/dvd')
if os.path.exists('/sys/block/{0}/size'.format(self.name)):
with open('/sys/block/{0}/size'.format(self.name), 'r') as sizesrc:
self.size = int(sizesrc.read()) * 512
if int(self.size) < 536870912:
raise Exception("Device too small for install")
if int(self.size) < 2147483648:
raise Exception("Device too small for install ({}MiB)".format(int(self.size)/1024/1024))
@property
def priority(self):
@@ -89,9 +98,11 @@ def main():
try:
disk = DiskInfo(disk)
disks.append(disk)
except SilentException:
pass
except Exception as e:
print("Skipping {0}: {1}".format(disk, str(e)))
nd = [x.name for x in sorted(disks, key=lambda x: x.priority)]
nd = [x.name for x in sorted(disks, key=lambda x: [x.priority, x.size])]
if nd:
open('/tmp/installdisk', 'w').write(nd[0])

View File

@@ -5,11 +5,37 @@
# noted below so custom commands are executed before
# the script notifies confluent that install is fully complete.
ntpsrvs=""
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
timedatectl set-timezone $(grep ^timezone: /etc/confluent/confluent.deploycfg|awk '{print $2}')
if grep ^ntpservers: /etc/confluent/confluent.deploycfg > /dev/null; then
for ntpsrv in $(sed -n '/^ntpservers:/,/^[^-]/p' /etc/confluent/confluent.deploycfg|sed 1d|sed '$d' | sed -e 's/^- //'); do
echo "server ${ntpsrv} iburst " >> /tmp/timeservers
done
fi
if [ -f /tmp/timeservers ]; then
ntpsrvs=$(cat /tmp/timeservers)
sed -i "1,/^pool * /c\\
${ntpsrvs//$'\n'/\\$'\n'}" /etc/chrony.conf
systemctl restart chronyd
rm -f /tmp/timeservers
fi
export nodename confluent_mgr confluent_profile
. /etc/confluent/functions
mkdir -p /var/log/confluent

View File

@@ -10,6 +10,13 @@ function test_mgr() {
return 1
}
function initconfluentscriptstmp() {
if [ -z "$confluentscripttmpdir" ]; then
mkdir -p /opt/confluent/tmpexec
confluentscripttmpdir=$(mktemp -d /opt/confluent/tmpexec/confluentscripts.XXXXXXXXX)
fi
}
function confluentpython() {
if [ -x /usr/libexec/platform-python ]; then
/usr/libexec/platform-python $*
@@ -53,6 +60,7 @@ function set_confluent_vars() {
if [ -z "$confluent_profile" ]; then
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
fi
export confluent_profile confluent_mgr nodename
}
fetch_remote() {
@@ -71,7 +79,8 @@ fetch_remote() {
}
source_remote_parts() {
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unset confluentscripttmpdir
initconfluentscriptstmp
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
@@ -85,7 +94,8 @@ source_remote_parts() {
}
run_remote_parts() {
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unset confluentscripttmpdir
initconfluentscriptstmp
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
@@ -104,10 +114,7 @@ source_remote() {
echo
echo '---------------------------------------------------------------------------'
echo Sourcing $1 from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
if [ -z "$confluentscripttmpdir" ]; then
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unsettmpdir=1
fi
initconfluentscriptstmp
echo Sourcing from $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
@@ -134,9 +141,9 @@ run_remote() {
echo '---------------------------------------------------------------------------'
echo Running $requestedcmd from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
if [ -z "$confluentscripttmpdir" ]; then
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unsettmpdir=1
fi
initconfluentscriptstmp
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
@@ -169,7 +176,8 @@ run_remote_python() {
fi
echo '---------------------------------------------------------------------------'
echo Running python script "'$*'" from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unset confluentscripttmpdir
initconfluentscriptstmp
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
mkdir -p $(dirname $1)

View File

@@ -2,6 +2,9 @@
import subprocess
import os
class SilentException(Exception):
pass
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
@@ -24,9 +27,11 @@ class DiskInfo(object):
continue
k, v = prop.split('=', 1)
if k == 'DEVTYPE' and v != 'disk':
if v == 'partition':
raise SilentException('Partition')
raise Exception('Not a disk')
elif k == 'DM_NAME':
raise Exception('Device Mapper')
raise SilentException('Device Mapper')
elif k == 'ID_MODEL':
self.model = v
elif k == 'DEVPATH':
@@ -50,13 +55,17 @@ class DiskInfo(object):
self.driver = v.replace('"', '')
elif k == 'ATTRS{subsystype}':
self.subsystype = v.replace('"', '')
elif k == 'ATTR{ro}' and v == '"1"':
raise Exception("Device is read-only")
if not self.driver and 'imsm' not in self.mdcontainer and self.subsystype != 'nvm':
raise Exception("No driver detected")
if self.driver == 'sr':
raise Exception('cd/dvd')
if os.path.exists('/sys/block/{0}/size'.format(self.name)):
with open('/sys/block/{0}/size'.format(self.name), 'r') as sizesrc:
self.size = int(sizesrc.read()) * 512
if int(self.size) < 536870912:
raise Exception("Device too small for install")
if int(self.size) < 2147483648:
raise Exception("Device too small for install ({}MiB)".format(int(self.size)/1024/1024))
@property
def priority(self):
@@ -89,9 +98,11 @@ def main():
try:
disk = DiskInfo(disk)
disks.append(disk)
except SilentException:
pass
except Exception as e:
print("Skipping {0}: {1}".format(disk, str(e)))
nd = [x.name for x in sorted(disks, key=lambda x: x.priority)]
nd = [x.name for x in sorted(disks, key=lambda x: [x.priority, x.size])]
if nd:
open('/tmp/installdisk', 'w').write(nd[0])

View File

@@ -107,7 +107,11 @@ if [ ! -z "$confluentsrv" ]; then
/usr/libexec/nm-initrd-generator ip=:dhcp6
else
confluenthttpsrv=$confluentsrv
ifname=$(ip -br link|grep LOWER_UP|grep -v UNKNOWN|head -n 1|awk '{print $1}')
ifname=""
while [ -z "$ifname" ]; do
ifname=$(ip -br link|grep LOWER_UP|grep -v ib|grep -v UNKNOWN|head -n 1|awk '{print $1}')
sleep 0.5
done
echo -n "Attempting to use dhcp to bring up $ifname..."
dhclient $ifname
while ! ip -br addr show dev $ifname | grep \\. > /dev/null; do

View File

@@ -25,7 +25,8 @@ if [ ! -f /etc/confluent/firstboot.ran ]; then
touch /etc/confluent/firstboot.ran
cat /etc/confluent/tls/*.pem >> /etc/pki/tls/certs/ca-bundle.crt
confluentpython /root/confignet
rm /root/confignet
run_remote firstboot.custom
# Firstboot scripts may be placed into firstboot.d, e.g. firstboot.d/01-firstaction.sh, firstboot.d/02-secondaction.sh
run_remote_parts firstboot.d

View File

@@ -10,6 +10,13 @@ function test_mgr() {
return 1
}
function initconfluentscriptstmp() {
if [ -z "$confluentscripttmpdir" ]; then
mkdir -p /opt/confluent/tmpexec
confluentscripttmpdir=$(mktemp -d /opt/confluent/tmpexec/confluentscripts.XXXXXXXXX)
fi
}
function confluentpython() {
if [ -x /usr/libexec/platform-python ]; then
/usr/libexec/platform-python $*
@@ -53,6 +60,7 @@ function set_confluent_vars() {
if [ -z "$confluent_profile" ]; then
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg | sed -e 's/[^ ]*: //')
fi
export confluent_profile confluent_mgr nodename
}
fetch_remote() {
@@ -71,7 +79,8 @@ fetch_remote() {
}
source_remote_parts() {
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unset confluentscripttmpdir
initconfluentscriptstmp
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
@@ -85,7 +94,8 @@ source_remote_parts() {
}
run_remote_parts() {
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unset confluentscripttmpdir
initconfluentscriptstmp
apiclient=/opt/confluent/bin/apiclient
if [ -f /etc/confluent/apiclient ]; then
apiclient=/etc/confluent/apiclient
@@ -104,10 +114,7 @@ source_remote() {
echo
echo '---------------------------------------------------------------------------'
echo Sourcing $1 from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
if [ -z "$confluentscripttmpdir" ]; then
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unsettmpdir=1
fi
initconfluentscriptstmp
echo Sourcing from $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
@@ -134,9 +141,9 @@ run_remote() {
echo '---------------------------------------------------------------------------'
echo Running $requestedcmd from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
if [ -z "$confluentscripttmpdir" ]; then
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unsettmpdir=1
fi
initconfluentscriptstmp
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
fetch_remote $1
@@ -169,7 +176,8 @@ run_remote_python() {
fi
echo '---------------------------------------------------------------------------'
echo Running python script "'$*'" from https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/
confluentscripttmpdir=$(mktemp -d /tmp/confluentscripts.XXXXXXXXX)
unset confluentscripttmpdir
initconfluentscriptstmp
echo Executing in $confluentscripttmpdir
cd $confluentscripttmpdir
mkdir -p $(dirname $1)

View File

@@ -2,6 +2,9 @@
import subprocess
import os
class SilentException(Exception):
pass
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
@@ -24,9 +27,11 @@ class DiskInfo(object):
continue
k, v = prop.split('=', 1)
if k == 'DEVTYPE' and v != 'disk':
if v == 'partition':
raise SilentException('Partition')
raise Exception('Not a disk')
elif k == 'DM_NAME':
raise Exception('Device Mapper')
raise SilentException('Device Mapper')
elif k == 'ID_MODEL':
self.model = v
elif k == 'DEVPATH':
@@ -50,13 +55,17 @@ class DiskInfo(object):
self.driver = v.replace('"', '')
elif k == 'ATTRS{subsystype}':
self.subsystype = v.replace('"', '')
elif k == 'ATTR{ro}' and v == '"1"':
raise Exception("Device is read-only")
if not self.driver and 'imsm' not in self.mdcontainer and self.subsystype != 'nvm':
raise Exception("No driver detected")
if self.driver == 'sr':
raise Exception('cd/dvd')
if os.path.exists('/sys/block/{0}/size'.format(self.name)):
with open('/sys/block/{0}/size'.format(self.name), 'r') as sizesrc:
self.size = int(sizesrc.read()) * 512
if int(self.size) < 536870912:
raise Exception("Device too small for install")
if int(self.size) < 2147483648:
raise Exception("Device too small for install ({}MiB)".format(int(self.size)/1024/1024))
@property
def priority(self):
@@ -89,9 +98,11 @@ def main():
try:
disk = DiskInfo(disk)
disks.append(disk)
except SilentException:
pass
except Exception as e:
print("Skipping {0}: {1}".format(disk, str(e)))
nd = [x.name for x in sorted(disks, key=lambda x: x.priority)]
nd = [x.name for x in sorted(disks, key=lambda x: [x.priority, x.size])]
if nd:
open('/tmp/installdisk', 'w').write(nd[0])

Some files were not shown because too many files have changed in this diff Show More