mirror of
https://github.com/xcat2/confluent.git
synced 2026-04-25 02:01:27 +00:00
Merge branch 'lenovo:master' into master
This commit is contained in:
@@ -8,6 +8,10 @@ import sys
|
||||
import time
|
||||
import shlex
|
||||
import subprocess
|
||||
try:
|
||||
import yaml
|
||||
except ImportError:
|
||||
pass
|
||||
try:
|
||||
from importlib.machinery import SourceFileLoader
|
||||
def load_source(mod, path):
|
||||
@@ -107,6 +111,89 @@ def get_interface_name(iname, settings):
|
||||
return iname
|
||||
return None
|
||||
|
||||
class NetplanManager(object):
|
||||
def __init__(self):
|
||||
self.cfgbydev = {}
|
||||
self.read_connections()
|
||||
|
||||
def read_connections(self):
|
||||
for plan in glob.glob('/etc/netplan/*.y*ml'):
|
||||
with open(plan) as planfile:
|
||||
planinfo = yaml.safe_load(planfile)
|
||||
if not planinfo:
|
||||
continue
|
||||
nicinfo = planinfo.get('network', {}).get('ethernets', {})
|
||||
for devname in nicinfo:
|
||||
if devname == 'lo':
|
||||
continue
|
||||
if 'gateway4' in nicinfo[devname]:
|
||||
# normalize deprecated syntax on read in
|
||||
gw4 = nicinfo[devname]['gateway4']
|
||||
del nicinfo[devname]['gateway4']
|
||||
routeinfo = nicinfo[devname].get('routes', [])
|
||||
for ri in routeinfo:
|
||||
if ri.get('via', None) == gw4 and ri.get('to', None) in ('default', '0.0.0.0/0', '0/0'):
|
||||
break
|
||||
else:
|
||||
routeinfo.append({
|
||||
'to': 'default',
|
||||
'via': gw4
|
||||
})
|
||||
nicinfo[devname]['routes'] = routeinfo
|
||||
self.cfgbydev[devname] = nicinfo[devname]
|
||||
|
||||
def apply_configuration(self, cfg):
|
||||
devnames = cfg['interfaces']
|
||||
if len(devnames) != 1:
|
||||
raise Exception('Multi-nic team/bonds not yet supported')
|
||||
stgs = cfg['settings']
|
||||
needcfgapply = False
|
||||
for devname in devnames:
|
||||
needcfgwrite = False
|
||||
if stgs['ipv6_method'] == 'static':
|
||||
curraddr = stgs['ipv6_address']
|
||||
currips = self.getcfgarrpath([devname, 'addresses'])
|
||||
if curraddr not in currips:
|
||||
needcfgwrite = True
|
||||
currips.append(curraddr)
|
||||
if stgs['ipv4_method'] == 'static':
|
||||
curraddr = stgs['ipv4_address']
|
||||
currips = self.getcfgarrpath([devname, 'addresses'])
|
||||
if curraddr not in currips:
|
||||
needcfgwrite = True
|
||||
currips.append(curraddr)
|
||||
gws = []
|
||||
gws.append(stgs.get('ipv4_gateway', None))
|
||||
gws.append(stgs.get('ipv6_gateway', None))
|
||||
for gwaddr in gws:
|
||||
if gwaddr:
|
||||
cfgroutes = self.getcfgarrpath([devname, 'routes'])
|
||||
for rinfo in cfgroutes:
|
||||
if rinfo.get('via', None) == gwaddr:
|
||||
break
|
||||
else:
|
||||
needcfgwrite = True
|
||||
cfgroutes.append({'via': gwaddr, 'to': 'default'})
|
||||
if needcfgwrite:
|
||||
needcfgapply = True
|
||||
newcfg = {'network': {'version': 2, 'ethernets': {devname: self.cfgbydev[devname]}}}
|
||||
with open('/etc/netplan/{0}-confluentcfg.yaml'.format(devname), 'w') as planout:
|
||||
planout.write(yaml.dump(newcfg))
|
||||
if needcfgapply:
|
||||
subprocess.call(['netplan', 'apply'])
|
||||
|
||||
def getcfgarrpath(self, devpath):
|
||||
currptr = self.cfgbydev
|
||||
for k in devpath[:-1]:
|
||||
if k not in currptr:
|
||||
currptr[k] = {}
|
||||
currptr = currptr[k]
|
||||
if devpath[-1] not in currptr:
|
||||
currptr[devpath[-1]] = []
|
||||
return currptr[devpath[-1]]
|
||||
|
||||
|
||||
|
||||
class WickedManager(object):
|
||||
def __init__(self):
|
||||
self.teamidx = 0
|
||||
@@ -360,6 +447,8 @@ if __name__ == '__main__':
|
||||
if not netname_to_interfaces['default']['interfaces']:
|
||||
del netname_to_interfaces['default']
|
||||
rm_tmp_llas(tmpllas)
|
||||
if os.path.exists('/usr/sbin/netplan'):
|
||||
nm = NetplanManager()
|
||||
if os.path.exists('/usr/bin/nmcli'):
|
||||
nm = NetworkManager(devtypes)
|
||||
elif os.path.exists('/usr/sbin/wicked'):
|
||||
|
||||
@@ -42,7 +42,7 @@ for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 coreo
|
||||
mv ../addons.cpio .
|
||||
cd ..
|
||||
done
|
||||
for os in el7 el8 suse15 el9 ubuntu20.04; do
|
||||
for os in el7 el8 suse15 el9 ubuntu20.04 ubuntu22.04; do
|
||||
mkdir ${os}disklessout
|
||||
cd ${os}disklessout
|
||||
if [ -d ../${os}bin ]; then
|
||||
|
||||
@@ -125,4 +125,6 @@ if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
||||
@@ -127,5 +127,7 @@ if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
kill $(grep -l ^/usr/lib/systemd/systemd-udevd /proc/*/cmdline|cut -d/ -f 3)
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
||||
@@ -127,5 +127,7 @@ if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
kill $(grep -l ^/usr/lib/systemd/systemd-udevd /proc/*/cmdline|cut -d/ -f 3)
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
||||
@@ -138,4 +138,6 @@ if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
||||
@@ -107,7 +107,7 @@ if [ "$v6meth" = static ]; then
|
||||
ip route add default via $v6gw
|
||||
fi
|
||||
fi
|
||||
v4meth=$(grep ^ipv6_method: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
v4meth=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
if [ "$v4meth" = static ]; then
|
||||
v4addr=$(grep ^ipv4_address: /etc/confluent/confluent.deploycfg | awk '{print $2}')
|
||||
v4prefix=$(grep ^prefix: /etc/confluent/confluent.deploycfg | awk '{print $2}')
|
||||
|
||||
@@ -0,0 +1,11 @@
|
||||
[Unit]
|
||||
Description=First Boot Process
|
||||
Requires=network-online.target
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/opt/confluent/bin/firstboot.sh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is executed on the first boot after install has
|
||||
# completed. It is best to edit the middle of the file as
|
||||
# noted below so custom commands are executed before
|
||||
# the script notifies confluent that install is fully complete.
|
||||
|
||||
HOME=$(getent passwd $(whoami)|cut -d: -f 6)
|
||||
export HOME
|
||||
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
|
||||
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
(
|
||||
exec >> /var/log/confluent/confluent-firstboot.log
|
||||
exec 2>> /var/log/confluent/confluent-firstboot.log
|
||||
chmod 600 /var/log/confluent/confluent-firstboot.log
|
||||
while ! ping -c 1 $confluent_mgr >& /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ ! -f /etc/confluent/firstboot.ran ]; then
|
||||
touch /etc/confluent/firstboot.ran
|
||||
|
||||
run_remote firstboot.custom
|
||||
# Firstboot scripts may be placed into firstboot.d, e.g. firstboot.d/01-firstaction.sh, firstboot.d/02-secondaction.sh
|
||||
run_remote_parts firstboot.d
|
||||
|
||||
# Induce execution of remote configuration, e.g. ansible plays in ansible/firstboot.d/
|
||||
run_remote_config firstboot.d
|
||||
fi
|
||||
|
||||
curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
|
||||
systemctl disable firstboot
|
||||
rm /etc/systemd/system/firstboot.service
|
||||
rm /etc/confluent/firstboot.ran
|
||||
) &
|
||||
tail --pid $! -F /var/log/confluent/confluent-firstboot.log > /dev/console
|
||||
@@ -0,0 +1,93 @@
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
class DiskInfo(object):
|
||||
def __init__(self, devname):
|
||||
self.name = devname
|
||||
self.wwn = None
|
||||
self.path = None
|
||||
self.model = ''
|
||||
self.size = 0
|
||||
self.driver = None
|
||||
self.mdcontainer = ''
|
||||
devnode = '/dev/{0}'.format(devname)
|
||||
qprop = subprocess.check_output(
|
||||
['udevadm', 'info', '--query=property', devnode])
|
||||
if not isinstance(qprop, str):
|
||||
qprop = qprop.decode('utf8')
|
||||
for prop in qprop.split('\n'):
|
||||
if '=' not in prop:
|
||||
continue
|
||||
k, v = prop.split('=', 1)
|
||||
if k == 'DEVTYPE' and v != 'disk':
|
||||
raise Exception('Not a disk')
|
||||
elif k == 'DM_NAME':
|
||||
raise Exception('Device Mapper')
|
||||
elif k == 'ID_MODEL':
|
||||
self.model = v
|
||||
elif k == 'DEVPATH':
|
||||
self.path = v
|
||||
elif k == 'ID_WWN':
|
||||
self.wwn = v
|
||||
elif k == 'MD_CONTAINER':
|
||||
self.mdcontainer = v
|
||||
attrs = subprocess.check_output(['udevadm', 'info', '-a', devnode])
|
||||
if not isinstance(attrs, str):
|
||||
attrs = attrs.decode('utf8')
|
||||
for attr in attrs.split('\n'):
|
||||
if '==' not in attr:
|
||||
continue
|
||||
k, v = attr.split('==', 1)
|
||||
k = k.strip()
|
||||
if k == 'ATTRS{size}':
|
||||
self.size = v.replace('"', '')
|
||||
elif (k == 'DRIVERS' and not self.driver
|
||||
and v not in ('"sd"', '""')):
|
||||
self.driver = v.replace('"', '')
|
||||
if not self.driver and 'imsm' not in self.mdcontainer:
|
||||
raise Exception("No driver detected")
|
||||
if os.path.exists('/sys/block/{0}/size'.format(self.name)):
|
||||
with open('/sys/block/{0}/size'.format(self.name), 'r') as sizesrc:
|
||||
self.size = int(sizesrc.read()) * 512
|
||||
if int(self.size) < 536870912:
|
||||
raise Exception("Device too small for install")
|
||||
|
||||
@property
|
||||
def priority(self):
|
||||
if self.model.lower() in ('m.2 nvme 2-bay raid kit', 'thinksystem_m.2_vd', 'thinksystem m.2', 'thinksystem_m.2'):
|
||||
return 0
|
||||
if 'imsm' in self.mdcontainer:
|
||||
return 1
|
||||
if self.driver == 'ahci':
|
||||
return 2
|
||||
if self.driver.startswith('megaraid'):
|
||||
return 3
|
||||
if self.driver.startswith('mpt'):
|
||||
return 4
|
||||
return 99
|
||||
|
||||
def __repr__(self):
|
||||
return repr({
|
||||
'name': self.name,
|
||||
'path': self.path,
|
||||
'wwn': self.wwn,
|
||||
'driver': self.driver,
|
||||
'size': self.size,
|
||||
'model': self.model,
|
||||
})
|
||||
|
||||
|
||||
def main():
|
||||
disks = []
|
||||
for disk in sorted(os.listdir('/sys/class/block')):
|
||||
try:
|
||||
disk = DiskInfo(disk)
|
||||
disks.append(disk)
|
||||
except Exception as e:
|
||||
print("Skipping {0}: {1}".format(disk, str(e)))
|
||||
nd = [x.name for x in sorted(disks, key=lambda x: x.priority)]
|
||||
if nd:
|
||||
open('/tmp/installdisk', 'w').write(nd[0])
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,419 @@
|
||||
#!/usr/bin/python3
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import shutil
|
||||
import socket
|
||||
import stat
|
||||
import struct
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
bootuuid = None
|
||||
|
||||
def get_next_part_meta(img, imgsize):
|
||||
if img.tell() == imgsize:
|
||||
return None
|
||||
pathlen = struct.unpack('!H', img.read(2))[0]
|
||||
mountpoint = img.read(pathlen).decode('utf8')
|
||||
jsonlen = struct.unpack('!I', img.read(4))[0]
|
||||
metadata = json.loads(img.read(jsonlen).decode('utf8'))
|
||||
img.seek(16, 1) # skip the two 64-bit values we don't use, they are in json
|
||||
nextlen = struct.unpack('!H', img.read(2))[0]
|
||||
img.seek(nextlen, 1) # skip filesystem type
|
||||
nextlen = struct.unpack('!H', img.read(2))[0]
|
||||
img.seek(nextlen, 1) # skip orig devname (redundant with json)
|
||||
nextlen = struct.unpack('!H', img.read(2))[0]
|
||||
img.seek(nextlen, 1) # skip padding
|
||||
nextlen = struct.unpack('!Q', img.read(8))[0]
|
||||
img.seek(nextlen, 1) # go to next section
|
||||
return metadata
|
||||
|
||||
def get_multipart_image_meta(img):
|
||||
img.seek(0, 2)
|
||||
imgsize = img.tell()
|
||||
img.seek(16)
|
||||
seekamt = img.read(1)
|
||||
img.seek(struct.unpack('B', seekamt)[0], 1)
|
||||
partinfo = get_next_part_meta(img, imgsize)
|
||||
while partinfo:
|
||||
yield partinfo
|
||||
partinfo = get_next_part_meta(img, imgsize)
|
||||
|
||||
def get_image_metadata(imgpath):
|
||||
with open(imgpath, 'rb') as img:
|
||||
header = img.read(16)
|
||||
if header == b'\x63\x7b\x9d\x26\xb7\xfd\x48\x30\x89\xf9\x11\xcf\x18\xfd\xff\xa1':
|
||||
for md in get_multipart_image_meta(img):
|
||||
yield md
|
||||
else:
|
||||
raise Exception('Installation from single part image not supported')
|
||||
|
||||
class PartedRunner():
|
||||
def __init__(self, disk):
|
||||
self.disk = disk
|
||||
|
||||
def run(self, command, check=True):
|
||||
command = command.split()
|
||||
command = ['parted', '-a', 'optimal', '-s', self.disk] + command
|
||||
if check:
|
||||
return subprocess.check_output(command).decode('utf8')
|
||||
else:
|
||||
return subprocess.run(command, stdout=subprocess.PIPE).stdout.decode('utf8')
|
||||
|
||||
def fixup(rootdir, vols):
|
||||
devbymount = {}
|
||||
for vol in vols:
|
||||
devbymount[vol['mount']] = vol['targetdisk']
|
||||
fstabfile = os.path.join(rootdir, 'etc/fstab')
|
||||
with open(fstabfile) as tfile:
|
||||
fstab = tfile.read().split('\n')
|
||||
while not fstab[0]:
|
||||
fstab = fstab[1:]
|
||||
if os.path.exists(os.path.join(rootdir, '.autorelabel')):
|
||||
os.unlink(os.path.join(rootdir, '.autorelabel'))
|
||||
with open(fstabfile, 'w') as tfile:
|
||||
for tab in fstab:
|
||||
entry = tab.split()
|
||||
if tab.startswith('#ORIGFSTAB#'):
|
||||
if entry[1] in devbymount:
|
||||
targetdev = devbymount[entry[1]]
|
||||
if targetdev.startswith('/dev/localstorage/'):
|
||||
entry[0] = targetdev
|
||||
else:
|
||||
uuid = subprocess.check_output(['blkid', '-s', 'UUID', '-o', 'value', targetdev]).decode('utf8')
|
||||
uuid = uuid.strip()
|
||||
entry[0] = 'UUID={}'.format(uuid)
|
||||
elif entry[2] == 'swap':
|
||||
entry[0] = '/dev/mapper/localstorage-swap'
|
||||
entry[0] = entry[0].ljust(42)
|
||||
entry[1] = entry[1].ljust(16)
|
||||
entry[3] = entry[3].ljust(28)
|
||||
tab = '\t'.join(entry)
|
||||
tfile.write(tab + '\n')
|
||||
with open(os.path.join(rootdir, 'etc/hostname'), 'w') as nameout:
|
||||
nameout.write(socket.gethostname() + '\n')
|
||||
selinuxconfig = os.path.join(rootdir, 'etc/selinux/config')
|
||||
policy = None
|
||||
if os.path.exists(selinuxconfig):
|
||||
with open(selinuxconfig) as cfgin:
|
||||
sec = cfgin.read().split('\n')
|
||||
for l in sec:
|
||||
l = l.split('#', 1)[0]
|
||||
if l.startswith('SELINUXTYPE='):
|
||||
_, policy = l.split('=')
|
||||
for sshkey in glob.glob(os.path.join(rootdir, 'etc/ssh/*_key*')):
|
||||
os.unlink(sshkey)
|
||||
for sshkey in glob.glob('/etc/ssh/*_key*'):
|
||||
newkey = os.path.join(rootdir, sshkey[1:])
|
||||
shutil.copy2(sshkey, newkey)
|
||||
finfo = os.stat(sshkey)
|
||||
os.chown(newkey, finfo[stat.ST_UID], finfo[stat.ST_GID])
|
||||
|
||||
# Will use confignet to handle networking for ubuntu
|
||||
shutil.rmtree(os.path.join(rootdir, 'etc/confluent/'))
|
||||
shutil.copytree('/etc/confluent', os.path.join(rootdir, 'etc/confluent'))
|
||||
if policy:
|
||||
sys.stdout.write('Applying SELinux labeling...')
|
||||
sys.stdout.flush()
|
||||
subprocess.check_call(['setfiles', '-r', rootdir, os.path.join(rootdir, 'etc/selinux/{}/contexts/files/file_contexts'.format(policy)), os.path.join(rootdir, 'etc')])
|
||||
subprocess.check_call(['setfiles', '-r', rootdir, os.path.join(rootdir, 'etc/selinux/{}/contexts/files/file_contexts'.format(policy)), os.path.join(rootdir, 'opt')])
|
||||
sys.stdout.write('Done\n')
|
||||
sys.stdout.flush()
|
||||
for metafs in ('proc', 'sys', 'dev'):
|
||||
subprocess.check_call(['mount', '-o', 'bind', '/{}'.format(metafs), os.path.join(rootdir, metafs)])
|
||||
if os.path.exists(os.path.join(rootdir, 'etc/lvm/devices/system.devices')):
|
||||
os.remove(os.path.join(rootdir, 'etc/lvm/devices/system.devices'))
|
||||
grubsyscfg = os.path.join(rootdir, 'etc/sysconfig/grub')
|
||||
if not os.path.exists(grubsyscfg):
|
||||
grubsyscfg = os.path.join(rootdir, 'etc/default/grub')
|
||||
with open(grubsyscfg) as defgrubin:
|
||||
defgrub = defgrubin.read().split('\n')
|
||||
with open(grubsyscfg, 'w') as defgrubout:
|
||||
for gline in defgrub:
|
||||
gline = gline.split()
|
||||
newline = []
|
||||
for ent in gline:
|
||||
if ent.startswith('resume=') or ent.startswith('rd.lvm.lv'):
|
||||
continue
|
||||
newline.append(ent)
|
||||
defgrubout.write(' '.join(newline) + '\n')
|
||||
grubcfg = subprocess.check_output(['find', os.path.join(rootdir, 'boot'), '-name', 'grub.cfg']).decode('utf8').strip().replace(rootdir, '/').replace('//', '/')
|
||||
grubcfg = grubcfg.split('\n')
|
||||
if not grubcfg[-1]:
|
||||
grubcfg = grubcfg[:-1]
|
||||
if len(grubcfg) == 1:
|
||||
grubcfg = grubcfg[0]
|
||||
else:
|
||||
for gcfg in grubcfg:
|
||||
rgcfg = os.path.join(rootdir, gcfg[1:]) # gcfg has a leading / to get rid of
|
||||
if os.stat(rgcfg).st_size > 256:
|
||||
grubcfg = gcfg
|
||||
else:
|
||||
with open(rgcfg, 'r') as gin:
|
||||
tgrubcfg = gin.read()
|
||||
tgrubcfg = tgrubcfg.split('\n')
|
||||
if 'search --no-floppy --fs-uuid --set=dev' in tgrubcfg[0]:
|
||||
tgrubcfg[0] = 'search --no-floppy --fs-uuid --set=dev ' + bootuuid
|
||||
elif 'search.fs_uuid ' in tgrubcfg[0] and 'root' in tgrubcfg[0]:
|
||||
tgrubcfg[0] = 'search.fs_uuid ' + bootuuid + ' root'
|
||||
with open(rgcfg, 'w') as gout:
|
||||
for gcline in tgrubcfg:
|
||||
gout.write(gcline)
|
||||
gout.write('\n')
|
||||
try:
|
||||
# must fixup root@d2:/boot/efi/EFI# cat ubuntu/grub.cfg ... uuid
|
||||
subprocess.check_call(['chroot', rootdir, 'grub-mkconfig', '-o', grubcfg])
|
||||
except Exception as e:
|
||||
print(repr(e))
|
||||
print(rootdir)
|
||||
print(grubcfg)
|
||||
time.sleep(86400)
|
||||
newroot = None
|
||||
with open('/etc/shadow') as shadowin:
|
||||
shents = shadowin.read().split('\n')
|
||||
for shent in shents:
|
||||
shent = shent.split(':')
|
||||
if not shent:
|
||||
continue
|
||||
if shent[0] == 'root' and shent[1] not in ('*', '!!', ''):
|
||||
newroot = shent[1]
|
||||
if newroot:
|
||||
shlines = None
|
||||
with open(os.path.join(rootdir, 'etc/shadow')) as oshadow:
|
||||
shlines = oshadow.read().split('\n')
|
||||
with open(os.path.join(rootdir, 'etc/shadow'), 'w') as oshadow:
|
||||
for line in shlines:
|
||||
if line.startswith('root:'):
|
||||
line = line.split(':')
|
||||
line[1] = newroot
|
||||
line = ':'.join(line)
|
||||
oshadow.write(line + '\n')
|
||||
partnum = None
|
||||
targblock = None
|
||||
for vol in vols:
|
||||
if vol['mount'] == '/boot/efi':
|
||||
targdev = vol['targetdisk']
|
||||
partnum = re.search('(\d+)$', targdev).group(1)
|
||||
targblock = re.search('(.*)\d+$', targdev).group(1)
|
||||
if targblock:
|
||||
shimpath = subprocess.check_output(['find', os.path.join(rootdir, 'boot/efi'), '-name', 'shimx64.efi']).decode('utf8').strip()
|
||||
shimpath = shimpath.replace(rootdir, '/').replace('/boot/efi', '').replace('//', '/').replace('/', '\\')
|
||||
subprocess.check_call(['efibootmgr', '-c', '-d', targblock, '-l', shimpath, '--part', partnum])
|
||||
#other network interfaces
|
||||
|
||||
|
||||
def had_swap():
|
||||
with open('/etc/fstab') as tabfile:
|
||||
tabs = tabfile.read().split('\n')
|
||||
for tab in tabs:
|
||||
tab = tab.split()
|
||||
if len(tab) < 3:
|
||||
continue
|
||||
if tab[2] == 'swap':
|
||||
return True
|
||||
return False
|
||||
|
||||
def install_to_disk(imgpath):
|
||||
global bootuuid
|
||||
lvmvols = {}
|
||||
deftotsize = 0
|
||||
mintotsize = 0
|
||||
deflvmsize = 0
|
||||
minlvmsize = 0
|
||||
biggestsize = 0
|
||||
biggestfs = None
|
||||
plainvols = {}
|
||||
allvols = []
|
||||
swapsize = 0
|
||||
if had_swap():
|
||||
with open('/proc/meminfo') as meminfo:
|
||||
swapsize = meminfo.read().split('\n')[0]
|
||||
swapsize = int(swapsize.split()[1])
|
||||
if swapsize < 2097152:
|
||||
swapsize = swapsize * 2
|
||||
elif swapsize > 8388608 and swapsize < 67108864:
|
||||
swapsize = swapsize * 0.5
|
||||
elif swapsize >= 67108864:
|
||||
swapsize = 33554432
|
||||
swapsize = int(swapsize * 1024)
|
||||
deftotsize = swapsize
|
||||
mintotsize = swapsize
|
||||
for fs in get_image_metadata(imgpath):
|
||||
allvols.append(fs)
|
||||
deftotsize += fs['initsize']
|
||||
mintotsize += fs['minsize']
|
||||
if fs['initsize'] > biggestsize:
|
||||
biggestfs = fs
|
||||
biggestsize = fs['initsize']
|
||||
if fs['device'].startswith('/dev/mapper'):
|
||||
lvmvols[fs['device'].replace('/dev/mapper/', '')] = fs
|
||||
deflvmsize += fs['initsize']
|
||||
minlvmsize += fs['minsize']
|
||||
else:
|
||||
plainvols[int(re.search('(\d+)$', fs['device'])[0])] = fs
|
||||
with open('/tmp/installdisk') as diskin:
|
||||
instdisk = diskin.read()
|
||||
instdisk = '/dev/' + instdisk
|
||||
parted = PartedRunner(instdisk)
|
||||
dinfo = parted.run('unit s print', check=False)
|
||||
dinfo = dinfo.split('\n')
|
||||
sectors = 0
|
||||
sectorsize = 0
|
||||
for inf in dinfo:
|
||||
if inf.startswith('Disk {0}:'.format(instdisk)):
|
||||
_, sectors = inf.split(': ')
|
||||
sectors = int(sectors.replace('s', ''))
|
||||
if inf.startswith('Sector size (logical/physical):'):
|
||||
_, sectorsize = inf.split(':')
|
||||
sectorsize = sectorsize.split('/')[0]
|
||||
sectorsize = sectorsize.replace('B', '')
|
||||
sectorsize = int(sectorsize)
|
||||
# for now, only support resizing/growing the largest partition
|
||||
minexcsize = deftotsize - biggestfs['initsize']
|
||||
mintotsize = deftotsize - biggestfs['initsize'] + biggestfs['minsize']
|
||||
minsectors = mintotsize // sectorsize
|
||||
if sectors < (minsectors + 65536):
|
||||
raise Exception('Disk too small to fit image')
|
||||
biggestsectors = sectors - (minexcsize // sectorsize)
|
||||
biggestsize = sectorsize * biggestsectors
|
||||
parted.run('mklabel gpt')
|
||||
curroffset = 2048
|
||||
for volidx in sorted(plainvols):
|
||||
vol = plainvols[volidx]
|
||||
if vol is not biggestfs:
|
||||
size = vol['initsize'] // sectorsize
|
||||
else:
|
||||
size = biggestsize // sectorsize
|
||||
size += 2047 - (size % 2048)
|
||||
end = curroffset + size
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart primary {}s {}s'.format(curroffset, end))
|
||||
vol['targetdisk'] = instdisk + '{0}'.format(volidx)
|
||||
curroffset += size + 1
|
||||
if not lvmvols:
|
||||
if swapsize:
|
||||
swapsize = swapsize // sectorsize
|
||||
swapsize += 2047 - (size % 2048)
|
||||
end = curroffset + swapsize
|
||||
if end > sectors:
|
||||
end = sectors
|
||||
parted.run('mkpart swap {}s {}s'.format(curroffset, end))
|
||||
subprocess.check_call(['mkswap', instdisk + '{}'.format(volidx + 1)])
|
||||
else:
|
||||
parted.run('mkpart lvm {}s 100%'.format(curroffset))
|
||||
lvmpart = instdisk + '{}'.format(volidx + 1)
|
||||
subprocess.check_call(['pvcreate', '-ff', '-y', lvmpart])
|
||||
subprocess.check_call(['vgcreate', 'localstorage', lvmpart])
|
||||
vginfo = subprocess.check_output(['vgdisplay', 'localstorage', '--units', 'b']).decode('utf8')
|
||||
vginfo = vginfo.split('\n')
|
||||
pesize = 0
|
||||
pes = 0
|
||||
for infline in vginfo:
|
||||
infline = infline.split()
|
||||
if len(infline) >= 3 and infline[:2] == ['PE', 'Size']:
|
||||
pesize = int(infline[2])
|
||||
if len(infline) >= 5 and infline[:2] == ['Free', 'PE']:
|
||||
pes = int(infline[4])
|
||||
takeaway = swapsize // pesize
|
||||
for volidx in lvmvols:
|
||||
vol = lvmvols[volidx]
|
||||
if vol is biggestfs:
|
||||
continue
|
||||
takeaway += vol['initsize'] // pesize
|
||||
takeaway += 1
|
||||
biggestextents = pes - takeaway
|
||||
for volidx in lvmvols:
|
||||
vol = lvmvols[volidx]
|
||||
if vol is biggestfs:
|
||||
extents = biggestextents
|
||||
else:
|
||||
extents = vol['initsize'] // pesize
|
||||
extents += 1
|
||||
if vol['mount'] == '/':
|
||||
lvname = 'root'
|
||||
else:
|
||||
lvname = vol['mount'].replace('/', '_')
|
||||
subprocess.check_call(['lvcreate', '-l', '{}'.format(extents), '-y', '-n', lvname, 'localstorage'])
|
||||
vol['targetdisk'] = '/dev/localstorage/{}'.format(lvname)
|
||||
if swapsize:
|
||||
subprocess.check_call(['lvcreate', '-y', '-l', '{}'.format(swapsize // pesize), '-n', 'swap', 'localstorage'])
|
||||
subprocess.check_call(['mkswap', '/dev/localstorage/swap'])
|
||||
os.makedirs('/run/imginst/targ')
|
||||
for vol in allvols:
|
||||
with open(vol['targetdisk'], 'wb') as partition:
|
||||
partition.write(b'\x00' * 1 * 1024 * 1024)
|
||||
subprocess.check_call(['mkfs.{}'.format(vol['filesystem']), vol['targetdisk']])
|
||||
subprocess.check_call(['mount', vol['targetdisk'], '/run/imginst/targ'])
|
||||
source = vol['mount'].replace('/', '_')
|
||||
source = '/run/imginst/sources/' + source
|
||||
blankfsstat = os.statvfs('/run/imginst/targ')
|
||||
blankused = (blankfsstat.f_blocks - blankfsstat.f_bfree) * blankfsstat.f_bsize
|
||||
sys.stdout.write('\nWriting {0}: '.format(vol['mount']))
|
||||
with subprocess.Popen(['cp', '-ax', source + '/.', '/run/imginst/targ']) as copier:
|
||||
stillrunning = copier.poll()
|
||||
lastprogress = 0.0
|
||||
while stillrunning is None:
|
||||
currfsstat = os.statvfs('/run/imginst/targ')
|
||||
currused = (currfsstat.f_blocks - currfsstat.f_bfree) * currfsstat.f_bsize
|
||||
currused -= blankused
|
||||
with open('/proc/meminfo') as meminf:
|
||||
for line in meminf.read().split('\n'):
|
||||
if line.startswith('Dirty:'):
|
||||
_, dirty, _ = line.split()
|
||||
dirty = int(dirty) * 1024
|
||||
progress = (currused - dirty) / vol['minsize']
|
||||
if progress < lastprogress:
|
||||
progress = lastprogress
|
||||
if progress > 0.99:
|
||||
progress = 0.99
|
||||
lastprogress = progress
|
||||
progress = progress * 100
|
||||
sys.stdout.write('\x1b[1K\rWriting {0}: {1:3.2f}%'.format(vol['mount'], progress))
|
||||
sys.stdout.flush()
|
||||
time.sleep(0.5)
|
||||
stillrunning = copier.poll()
|
||||
if stillrunning != 0:
|
||||
raise Exception("Error copying volume")
|
||||
with subprocess.Popen(['sync']) as syncrun:
|
||||
stillrunning = syncrun.poll()
|
||||
while stillrunning is None:
|
||||
with open('/proc/meminfo') as meminf:
|
||||
for line in meminf.read().split('\n'):
|
||||
if line.startswith('Dirty:'):
|
||||
_, dirty, _ = line.split()
|
||||
dirty = int(dirty) * 1024
|
||||
progress = (vol['minsize'] - dirty) / vol['minsize']
|
||||
if progress < lastprogress:
|
||||
progress = lastprogress
|
||||
if progress > 0.99:
|
||||
progress = 0.99
|
||||
lastprogress = progress
|
||||
progress = progress * 100
|
||||
sys.stdout.write('\x1b[1K\rWriting {0}: {1:3.2f}%'.format(vol['mount'], progress))
|
||||
sys.stdout.flush()
|
||||
time.sleep(0.5)
|
||||
stillrunning = syncrun.poll()
|
||||
sys.stdout.write('\x1b[1K\rDone writing {0}'.format(vol['mount']))
|
||||
sys.stdout.write('\n')
|
||||
sys.stdout.flush()
|
||||
if vol['mount'] == '/boot':
|
||||
tbootuuid = subprocess.check_output(['blkid', vol['targetdisk']])
|
||||
if b'UUID="' in tbootuuid:
|
||||
bootuuid = tbootuuid.split(b'UUID="', 1)[1].split(b'"')[0].decode('utf8')
|
||||
|
||||
|
||||
|
||||
|
||||
subprocess.check_call(['umount', '/run/imginst/targ'])
|
||||
for vol in allvols:
|
||||
subprocess.check_call(['mount', vol['targetdisk'], '/run/imginst/targ/' + vol['mount']])
|
||||
fixup('/run/imginst/targ', allvols)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
install_to_disk(os.environ['mountsrc'])
|
||||
|
||||
@@ -127,11 +127,13 @@ chmod +x /sysroot/opt/confluent/bin/onboot.sh
|
||||
cp /opt/confluent/bin/apiclient /sysroot/opt/confluent/bin
|
||||
ln -s /etc/systemd/system/onboot.service /sysroot/etc/systemd/system/multi-user.target.wants/onboot.service
|
||||
cp /etc/confluent/functions /sysroot/etc/confluent/functions
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
mv /lib/firmware /lib/firmware-ramfs
|
||||
ln -s /sysroot/lib/firmware /lib/firmware
|
||||
if grep installtodisk /proc/cmdline > /dev/null; then
|
||||
. /etc/confluent/functions
|
||||
run_remote installimage
|
||||
exec reboot -f
|
||||
fi
|
||||
mv /lib/modules/$(uname -r) /lib/modules/$(uname -r)-ramfs
|
||||
ln -s /sysroot/lib/modules/$(uname -r) /lib/modules/
|
||||
exec /opt/confluent/bin/start_root
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
. /etc/confluent/functions
|
||||
# the image will be used to deploy itself
|
||||
# provide both access to image (for parsing metadata)
|
||||
# and existing mounts of image (to take advantage of caching)
|
||||
mount -o bind /sys /sysroot/sys
|
||||
mount -o bind /dev /sysroot/dev
|
||||
mount -o bind /proc /sysroot/proc
|
||||
mount -o bind /run /sysroot/run
|
||||
|
||||
|
||||
if [ ! -f /tmp/mountparts.sh ]; then
|
||||
mkdir -p /sysroot/run/imginst/sources/_
|
||||
mount -o bind /mnt/remote /sysroot/run/imginst/sources/_
|
||||
else
|
||||
for srcmount in $(cat /tmp/mountparts.sh | awk '{print $2}'); do
|
||||
srcname=${srcmount#/dev/mapper/mproot}
|
||||
srcdir=$(echo $srcmount | sed -e 's!/dev/mapper/mproot!/mnt/remote!' -e 's!_!/!g')
|
||||
mkdir -p /sysroot/run/imginst/sources/$srcname
|
||||
mount -o bind $srcdir /sysroot/run/imginst/sources/$srcname
|
||||
done
|
||||
fi
|
||||
cd /sysroot/run
|
||||
chroot /sysroot/ bash -c "source /etc/confluent/functions; run_remote_python getinstalldisk"
|
||||
chroot /sysroot/ bash -c "source /etc/confluent/functions; run_remote_parts pre.d"
|
||||
if [ ! -f /sysroot/tmp/installdisk ]; then
|
||||
echo 'Unable to find a suitable installation target device, ssh to port 2222 to investigate'
|
||||
while [ ! -f /sysroot/tmp/installdisk ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
lvm vgchange -a n
|
||||
udevadm control -e
|
||||
if [ -f /sysroot/etc/lvm/devices/system.devices ]; then
|
||||
rm /sysroot/etc/lvm/devices/system.devices
|
||||
fi
|
||||
chroot /sysroot /usr/lib/systemd/systemd-udevd --daemon
|
||||
chroot /sysroot bash -c "source /etc/confluent/functions; run_remote_python image2disk.py"
|
||||
echo "Port 22" >> /etc/ssh/sshd_config
|
||||
echo 'Match LocalPort 22' >> /etc/ssh/sshd_config
|
||||
echo ' ChrootDirectory /sysroot/run/imginst/targ' >> /etc/ssh/sshd_config
|
||||
kill -HUP $(cat /run/sshd.pid)
|
||||
cat /tls/*.pem > /sysroot/run/imginst/targ/usr/local/share/ca-certificates/confluent.crt
|
||||
chroot /sysroot/run/imginst/targ update-ca-certificates
|
||||
|
||||
chroot /sysroot/run/imginst/targ bash -c "source /etc/confluent/functions; run_remote post.sh"
|
||||
chroot /sysroot bash -c "umount \$(tac /proc/mounts|awk '{print \$2}'|grep ^/run/imginst/targ)"
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is executed 'chrooted' into a cloned disk target before rebooting
|
||||
#
|
||||
|
||||
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
|
||||
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
|
||||
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
|
||||
export nodename confluent_mgr confluent_profile
|
||||
. /etc/confluent/functions
|
||||
mkdir -p /var/log/confluent
|
||||
chmod 700 /var/log/confluent
|
||||
exec >> /var/log/confluent/confluent-post.log
|
||||
exec 2>> /var/log/confluent/confluent-post.log
|
||||
chmod 600 /var/log/confluent/confluent-post.log
|
||||
tail -f /var/log/confluent/confluent-post.log > /dev/console &
|
||||
logshowpid=$!
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.service > /etc/systemd/system/firstboot.service
|
||||
mkdir -p /opt/confluent/bin
|
||||
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /opt/confluent/bin/firstboot.sh
|
||||
chmod +x /opt/confluent/bin/firstboot.sh
|
||||
systemctl enable firstboot
|
||||
run_remote_python syncfileclient
|
||||
run_remote_python confignet
|
||||
run_remote post.custom
|
||||
# post scripts may be placed into post.d, e.g. post.d/01-firstaction.sh, post.d/02-secondaction.sh
|
||||
run_remote_parts post.d
|
||||
|
||||
# Induce execution of remote configuration, e.g. ansible plays in ansible/post.d/
|
||||
run_remote_config post.d
|
||||
|
||||
curl -sf -X POST -d 'status: staged' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
|
||||
|
||||
kill $logshowpid
|
||||
|
||||
|
||||
@@ -0,0 +1,286 @@
|
||||
#!/usr/bin/python3
|
||||
import subprocess
|
||||
import importlib
|
||||
import tempfile
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import pwd
|
||||
import grp
|
||||
from importlib.machinery import SourceFileLoader
|
||||
try:
|
||||
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
|
||||
except FileNotFoundError:
|
||||
apiclient = SourceFileLoader('apiclient', '/etc/confluent/apiclient').load_module()
|
||||
|
||||
|
||||
def partitionhostsline(line):
|
||||
comment = ''
|
||||
try:
|
||||
cmdidx = line.index('#')
|
||||
comment = line[cmdidx:]
|
||||
line = line[:cmdidx].strip()
|
||||
except ValueError:
|
||||
pass
|
||||
if not line:
|
||||
return '', [], comment
|
||||
ipaddr, names = line.split(maxsplit=1)
|
||||
names = names.split()
|
||||
return ipaddr, names, comment
|
||||
|
||||
class HostMerger(object):
|
||||
def __init__(self):
|
||||
self.byip = {}
|
||||
self.byname = {}
|
||||
self.sourcelines = []
|
||||
self.targlines = []
|
||||
|
||||
def read_source(self, sourcefile):
|
||||
with open(sourcefile, 'r') as hfile:
|
||||
self.sourcelines = hfile.read().split('\n')
|
||||
while not self.sourcelines[-1]:
|
||||
self.sourcelines = self.sourcelines[:-1]
|
||||
for x in range(len(self.sourcelines)):
|
||||
line = self.sourcelines[x]
|
||||
currip, names, comment = partitionhostsline(line)
|
||||
if currip:
|
||||
self.byip[currip] = x
|
||||
for name in names:
|
||||
self.byname[name] = x
|
||||
|
||||
def read_target(self, targetfile):
|
||||
with open(targetfile, 'r') as hfile:
|
||||
lines = hfile.read().split('\n')
|
||||
if not lines[-1]:
|
||||
lines = lines[:-1]
|
||||
for y in range(len(lines)):
|
||||
line = lines[y]
|
||||
currip, names, comment = partitionhostsline(line)
|
||||
if currip in self.byip:
|
||||
x = self.byip[currip]
|
||||
if self.sourcelines[x] is None:
|
||||
# have already consumed this enntry
|
||||
continue
|
||||
self.targlines.append(self.sourcelines[x])
|
||||
self.sourcelines[x] = None
|
||||
continue
|
||||
for name in names:
|
||||
if name in self.byname:
|
||||
x = self.byname[name]
|
||||
if self.sourcelines[x] is None:
|
||||
break
|
||||
self.targlines.append(self.sourcelines[x])
|
||||
self.sourcelines[x] = None
|
||||
break
|
||||
else:
|
||||
self.targlines.append(line)
|
||||
|
||||
def write_out(self, targetfile):
|
||||
while not self.targlines[-1]:
|
||||
self.targlines = self.targlines[:-1]
|
||||
if not self.targlines:
|
||||
break
|
||||
while not self.sourcelines[-1]:
|
||||
self.sourcelines = self.sourcelines[:-1]
|
||||
if not self.sourcelines:
|
||||
break
|
||||
with open(targetfile, 'w') as hosts:
|
||||
for line in self.targlines:
|
||||
hosts.write(line + '\n')
|
||||
for line in self.sourcelines:
|
||||
if line is not None:
|
||||
hosts.write(line + '\n')
|
||||
|
||||
|
||||
class CredMerger:
|
||||
def __init__(self):
|
||||
try:
|
||||
with open('/etc/login.defs', 'r') as ldefs:
|
||||
defs = ldefs.read().split('\n')
|
||||
except FileNotFoundError:
|
||||
defs = []
|
||||
lkup = {}
|
||||
self.discardnames = {}
|
||||
self.shadowednames = {}
|
||||
for line in defs:
|
||||
try:
|
||||
line = line[:line.index('#')]
|
||||
except ValueError:
|
||||
pass
|
||||
keyval = line.split()
|
||||
if len(keyval) < 2:
|
||||
continue
|
||||
lkup[keyval[0]] = keyval[1]
|
||||
self.uidmin = int(lkup.get('UID_MIN', 1000))
|
||||
self.uidmax = int(lkup.get('UID_MAX', 60000))
|
||||
self.gidmin = int(lkup.get('GID_MIN', 1000))
|
||||
self.gidmax = int(lkup.get('GID_MAX', 60000))
|
||||
self.shadowlines = None
|
||||
|
||||
def read_passwd(self, source, targfile=False):
|
||||
self.read_generic(source, self.uidmin, self.uidmax, targfile)
|
||||
|
||||
def read_group(self, source, targfile=False):
|
||||
self.read_generic(source, self.gidmin, self.gidmax, targfile)
|
||||
|
||||
def read_generic(self, source, minid, maxid, targfile):
|
||||
if targfile:
|
||||
self.targdata = []
|
||||
else:
|
||||
self.sourcedata = []
|
||||
with open(source, 'r') as inputfile:
|
||||
for line in inputfile.read().split('\n'):
|
||||
try:
|
||||
name, _, uid, _ = line.split(':', 3)
|
||||
uid = int(uid)
|
||||
except ValueError:
|
||||
continue
|
||||
if targfile:
|
||||
if uid < minid or uid > maxid:
|
||||
self.targdata.append(line)
|
||||
else:
|
||||
self.discardnames[name] = 1
|
||||
else:
|
||||
if name[0] in ('+', '#', '@'):
|
||||
self.sourcedata.append(line)
|
||||
elif uid >= minid and uid <= maxid:
|
||||
self.sourcedata.append(line)
|
||||
|
||||
def read_shadow(self, source):
|
||||
self.shadowlines = []
|
||||
try:
|
||||
with open(source, 'r') as inshadow:
|
||||
for line in inshadow.read().split('\n'):
|
||||
try:
|
||||
name, _ = line.split(':' , 1)
|
||||
except ValueError:
|
||||
continue
|
||||
if name in self.discardnames:
|
||||
continue
|
||||
self.shadowednames[name] = 1
|
||||
self.shadowlines.append(line)
|
||||
except FileNotFoundError:
|
||||
return
|
||||
|
||||
def write_out(self, outfile):
|
||||
with open(outfile, 'w') as targ:
|
||||
for line in self.targdata:
|
||||
targ.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
targ.write(line + '\n')
|
||||
if outfile == '/etc/passwd':
|
||||
if self.shadowlines is None:
|
||||
self.read_shadow('/etc/shadow')
|
||||
with open('/etc/shadow', 'w') as shadout:
|
||||
for line in self.shadowlines:
|
||||
shadout.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
name, _ = line.split(':', 1)
|
||||
if name[0] in ('+', '#', '@'):
|
||||
continue
|
||||
if name in self.shadowednames:
|
||||
continue
|
||||
shadout.write(name + ':!:::::::\n')
|
||||
if outfile == '/etc/group':
|
||||
if self.shadowlines is None:
|
||||
self.read_shadow('/etc/gshadow')
|
||||
with open('/etc/gshadow', 'w') as shadout:
|
||||
for line in self.shadowlines:
|
||||
shadout.write(line + '\n')
|
||||
for line in self.sourcedata:
|
||||
name, _ = line.split(':' , 1)
|
||||
if name in self.shadowednames:
|
||||
continue
|
||||
shadout.write(name + ':!::\n')
|
||||
|
||||
def appendonce(basepath, filename):
|
||||
with open(filename, 'rb') as filehdl:
|
||||
thedata = filehdl.read()
|
||||
targname = filename.replace(basepath, '')
|
||||
try:
|
||||
with open(targname, 'rb') as filehdl:
|
||||
targdata = filehdl.read()
|
||||
except IOError:
|
||||
targdata = b''
|
||||
if thedata in targdata:
|
||||
return
|
||||
with open(targname, 'ab') as targhdl:
|
||||
targhdl.write(thedata)
|
||||
|
||||
def synchronize():
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
appendoncedir = tempfile.mkdtemp()
|
||||
try:
|
||||
ac = apiclient.HTTPSClient()
|
||||
myips = []
|
||||
ipaddrs = subprocess.check_output(['ip', '-br', 'a']).split(b'\n')
|
||||
for line in ipaddrs:
|
||||
isa = line.split()
|
||||
if len(isa) < 3 or isa[1] != b'UP':
|
||||
continue
|
||||
for addr in isa[2:]:
|
||||
if addr.startswith(b'fe80::') or addr.startswith(b'169.254'):
|
||||
continue
|
||||
addr = addr.split(b'/')[0]
|
||||
if not isinstance(addr, str):
|
||||
addr = addr.decode('utf8')
|
||||
myips.append(addr)
|
||||
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
|
||||
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
|
||||
if status == 202:
|
||||
lastrsp = ''
|
||||
while status != 204:
|
||||
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
|
||||
if not isinstance(rsp, str):
|
||||
rsp = rsp.decode('utf8')
|
||||
if status == 200:
|
||||
lastrsp = rsp
|
||||
pendpasswd = os.path.join(tmpdir, 'etc/passwd')
|
||||
if os.path.exists(pendpasswd):
|
||||
cm = CredMerger()
|
||||
cm.read_passwd(pendpasswd, targfile=False)
|
||||
cm.read_passwd('/etc/passwd', targfile=True)
|
||||
cm.write_out('/etc/passwd')
|
||||
pendgroup = os.path.join(tmpdir, 'etc/group')
|
||||
if os.path.exists(pendgroup):
|
||||
cm = CredMerger()
|
||||
cm.read_group(pendgroup, targfile=False)
|
||||
cm.read_group('/etc/group', targfile=True)
|
||||
cm.write_out('/etc/group')
|
||||
pendhosts = os.path.join(tmpdir, 'etc/hosts')
|
||||
if os.path.exists(pendhosts):
|
||||
cm = HostMerger()
|
||||
cm.read_source(pendhosts)
|
||||
cm.read_target('/etc/hosts')
|
||||
cm.write_out('/etc/hosts')
|
||||
for dirn in os.walk(appendoncedir):
|
||||
for filen in dirn[2]:
|
||||
appendonce(appendoncedir, os.path.join(dirn[0], filen))
|
||||
if lastrsp:
|
||||
lastrsp = json.loads(lastrsp)
|
||||
opts = lastrsp.get('options', {})
|
||||
for fname in opts:
|
||||
uid = -1
|
||||
gid = -1
|
||||
for opt in opts[fname]:
|
||||
if opt == 'owner':
|
||||
try:
|
||||
uid = pwd.getpwnam(opts[fname][opt]['name']).pw_uid
|
||||
except KeyError:
|
||||
uid = opts[fname][opt]['id']
|
||||
elif opt == 'group':
|
||||
try:
|
||||
gid = grp.getgrnam(opts[fname][opt]['name']).gr_gid
|
||||
except KeyError:
|
||||
gid = opts[fname][opt]['id']
|
||||
elif opt == 'permissions':
|
||||
os.chmod(fname, int(opts[fname][opt], 8))
|
||||
if uid != -1 or gid != -1:
|
||||
os.chown(fname, uid, gid)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
shutil.rmtree(appendoncedir)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
synchronize()
|
||||
@@ -74,8 +74,11 @@ if [ -e /sys/firmware/efi ]; then
|
||||
fi
|
||||
fi
|
||||
cat /target/etc/confluent/tls/*.pem > /target/etc/confluent/ca.pem
|
||||
cat /target/etc/confluent/tls/*.pem > /target/usr/local/share/ca-certificates/confluent.crt
|
||||
cat /target/etc/confluent/tls/*.pem > /etc/confluent/ca.pem
|
||||
chroot /target update-ca-certificates
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_python syncfileclient"
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_python confignet"
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d"
|
||||
source /target/etc/confluent/functions
|
||||
|
||||
|
||||
1
confluent_osdeploy/ubuntu22.04-diskless
Symbolic link
1
confluent_osdeploy/ubuntu22.04-diskless
Symbolic link
@@ -0,0 +1 @@
|
||||
ubuntu20.04-diskless
|
||||
@@ -74,8 +74,11 @@ if [ -e /sys/firmware/efi ]; then
|
||||
fi
|
||||
fi
|
||||
cat /target/etc/confluent/tls/*.pem > /target/etc/confluent/ca.pem
|
||||
cat /target/etc/confluent/tls/*.pem > /target/usr/local/share/ca-certificates/confluent.crt
|
||||
cat /target/etc/confluent/tls/*.pem > /etc/confluent/ca.pem
|
||||
chroot /target update-ca-certificates
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_python syncfileclient"
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_python confignet"
|
||||
chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d"
|
||||
source /target/etc/confluent/functions
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import eventlet
|
||||
import eventlet.green.select as select
|
||||
import eventlet.green.subprocess as subprocess
|
||||
from fnmatch import fnmatch
|
||||
import glob
|
||||
import logging
|
||||
logging.getLogger('libarchive').addHandler(logging.NullHandler())
|
||||
@@ -153,6 +154,14 @@ def update_boot_esxi(profiledir, profile, label):
|
||||
'{0}/boot.img'.format(profiledir), profname], preexec_fn=relax_umask)
|
||||
|
||||
|
||||
def find_glob(loc, fileglob):
|
||||
for cdir, _, fs in os.walk(loc):
|
||||
for f in fs:
|
||||
if fnmatch(f, fileglob):
|
||||
return os.path.join(cdir, f)
|
||||
return None
|
||||
|
||||
|
||||
def update_boot_linux(profiledir, profile, label):
|
||||
profname = os.path.basename(profiledir)
|
||||
kernelargs = profile.get('kernelargs', '')
|
||||
@@ -170,7 +179,11 @@ def update_boot_linux(profiledir, profile, label):
|
||||
for initramfs in initrds:
|
||||
grubcfg += " /initramfs/{0}".format(initramfs)
|
||||
grubcfg += "\n}\n"
|
||||
with open(profiledir + '/boot/efi/boot/grub.cfg', 'w') as grubout:
|
||||
# well need to honor grubprefix path if different
|
||||
grubcfgpath = find_glob(profiledir + '/boot', 'grub.cfg')
|
||||
if not grubcfgpath:
|
||||
grubcfgpath = profiledir + '/boot/efi/boot/grub.cfg'
|
||||
with open(grubcfgpath, 'w') as grubout:
|
||||
grubout.write(grubcfg)
|
||||
ipxeargs = kernelargs
|
||||
for initramfs in initrds:
|
||||
|
||||
@@ -34,7 +34,7 @@ mkdir -p opt/confluent/lib/imgutil
|
||||
mkdir -p opt/confluent/bin
|
||||
mv imgutil opt/confluent/bin/
|
||||
chmod a+x opt/confluent/bin/imgutil
|
||||
mv ubuntu suse15 el7 el9 el8 opt/confluent/lib/imgutil/
|
||||
mv ubuntu* suse15 el7 el9 el8 opt/confluent/lib/imgutil/
|
||||
mkdir -p opt/confluent/share/licenses/confluent_imgutil
|
||||
cp LICENSE opt/confluent/share/licenses/confluent_imgutil
|
||||
|
||||
|
||||
@@ -3,7 +3,13 @@ import configparser
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
import datetime
|
||||
from distutils.dir_util import copy_tree
|
||||
import inspect
|
||||
from shutil import copytree as copytree
|
||||
if hasattr(inspect, 'getfullargspec') and 'dirs_exist_ok' in inspect.getfullargspec(copytree).args:
|
||||
def copy_tree(src, dst):
|
||||
copytree(src, dst, dirs_exist_ok=True)
|
||||
else:
|
||||
from distutils.dir_util import copy_tree
|
||||
import glob
|
||||
import json
|
||||
import argparse
|
||||
@@ -139,13 +145,30 @@ def capture_fs(args):
|
||||
masker.mask('/etc/ssh/*key')
|
||||
masker.mask('/etc/pki/tls/private/*')
|
||||
masker.mask('/root/.ssh/id_*')
|
||||
masker.mask('/etc/netplan/*.yaml')
|
||||
subprocess.check_call(['mksquashfs', '/run/imgutil/capin', fname + '.sfs', '-comp', 'xz'])
|
||||
|
||||
def capture_local_cleanup():
|
||||
shutil.rmtree('/usr/lib/dracut/modules.d/97confluent')
|
||||
try:
|
||||
shutil.rmtree('/usr/lib/dracut/modules.d/97confluent')
|
||||
except Exception:
|
||||
pass
|
||||
subprocess.check_call(['umount', '/run/imgutil/capout'])
|
||||
|
||||
def build_boot_tree(targpath):
|
||||
if glob.glob('/usr/lib/dracut/modules.d/97confluent/install*'):
|
||||
return build_el_boot_tree(targpath)
|
||||
elif glob.glob('/etc/initramfs-tools/'):
|
||||
return build_deb_boot_tree(targpath)
|
||||
|
||||
def build_deb_boot_tree(targpath):
|
||||
kver = os.uname().release
|
||||
mkdirp(os.path.join(targpath, 'boot/initramfs/'))
|
||||
subprocess.check_call(['mkinitramfs', '-o', os.path.join(targpath, 'boot/initramfs/distribution')])
|
||||
shutil.copy2('/boot/vmlinuz-{}'.format(kver), os.path.join(targpath, 'boot/kernel'))
|
||||
gather_bootloader(targpath)
|
||||
|
||||
def build_el_boot_tree(targpath):
|
||||
for dscript in glob.glob('/usr/lib/dracut/modules.d/97confluent/install*'):
|
||||
os.chmod(dscript, 0o755)
|
||||
kver = os.uname().release
|
||||
@@ -168,19 +191,26 @@ def capture_remote(args):
|
||||
# with here locally,
|
||||
# another that is remotely called to gather target profile info
|
||||
# and a third that is exclusive to pack_image for diskless mode
|
||||
utillib = __file__.replace('bin/imgutil', 'lib/imgutil')
|
||||
utillib = os.path.join(utillib, 'el8/dracut/')
|
||||
subprocess.check_call(['ssh', targ, 'mkdir', '-p', '/run/imgutil/capenv'])
|
||||
subprocess.check_call(['rsync', __file__, '{0}:/run/imgutil/capenv/'.format(targ)])
|
||||
finfo = subprocess.check_output(['ssh', targ, 'python3', '/run/imgutil/capenv/imgutil', 'getfingerprint']).decode('utf8')
|
||||
finfo = json.loads(finfo)
|
||||
if finfo['oscategory'] not in ('el8', 'el9'):
|
||||
if finfo['oscategory'] not in ('el8', 'el9', 'ubuntu20.04', 'ubuntu22.04'):
|
||||
raise Exception('Not yet supported for capture: ' + repr(finfo))
|
||||
oscat = finfo['oscategory']
|
||||
subprocess.check_call(['ssh', '-o', 'LogLevel=QUIET', '-t', targ, 'python3', '/run/imgutil/capenv/imgutil', 'capturelocal'])
|
||||
utillib = __file__.replace('bin/imgutil', 'lib/imgutil')
|
||||
utillib = os.path.join(utillib, '{}/dracut/'.format(oscat))
|
||||
subprocess.check_call(['rsync', '-a', utillib, '{0}:/usr/lib/dracut/modules.d/97confluent'.format(targ)])
|
||||
utillib = __file__.replace('bin/imgutil', 'lib/imgutil')
|
||||
if oscat.startswith('ubuntu'):
|
||||
utillib = os.path.join(utillib, '{}/initramfs-tools/'.format(oscat))
|
||||
if not os.path.exists(utillib):
|
||||
raise Exception('Not yet supported for capture: ' + repr(finfo))
|
||||
subprocess.check_call(['rsync', '-a', utillib, '{0}:/etc/initramfs-tools'.format(targ)])
|
||||
subprocess.check_call(['ssh', '-o', 'LogLevel=QUIET', '-t', targ, 'chmod', '+x', '/etc/initramfs-tools/hooks/confluent'])
|
||||
else:
|
||||
utillib = os.path.join(utillib, '{}/dracut/'.format(oscat))
|
||||
if not os.path.exists(utillib):
|
||||
raise Exception('Not yet supported for capture: ' + repr(finfo))
|
||||
subprocess.check_call(['rsync', '-a', utillib, '{0}:/usr/lib/dracut/modules.d/97confluent'.format(targ)])
|
||||
sys.stdout.write('Generating deployment initramfs...')
|
||||
sys.stdout.flush()
|
||||
subprocess.check_call(['ssh', '-o', 'LogLevel=QUIET', '-t', targ, 'python3', '/run/imgutil/capenv/imgutil', 'capturelocalboot'])
|
||||
@@ -1348,6 +1378,10 @@ def gather_bootloader(outdir, rootpath='/'):
|
||||
grubs = glob.glob(grubs)
|
||||
if len(grubs) == 1:
|
||||
grubbin = grubs[0]
|
||||
if 'ubuntu' in grubbin: # we needd to store a hint that this grub has a different hard coded prefix
|
||||
mkdirp(os.path.join(outdir, 'boot/EFI/ubuntu/'))
|
||||
with open(os.path.join(outdir, 'boot/EFI/ubuntu/grub.cfg'), 'w') as wo:
|
||||
wo.write('')
|
||||
shutil.copyfile(grubbin, os.path.join(outdir, 'boot/efi/boot/grubx64.efi'))
|
||||
shutil.copyfile(grubbin, os.path.join(outdir, 'boot/efi/boot/grub.efi'))
|
||||
|
||||
|
||||
1
imgutil/ubuntu20.04
Symbolic link
1
imgutil/ubuntu20.04
Symbolic link
@@ -0,0 +1 @@
|
||||
ubuntu
|
||||
1
imgutil/ubuntu22.04
Symbolic link
1
imgutil/ubuntu22.04
Symbolic link
@@ -0,0 +1 @@
|
||||
ubuntu
|
||||
Reference in New Issue
Block a user