2
0
mirror of https://github.com/xcat2/confluent.git synced 2026-04-24 17:51:29 +00:00

Merge branch 'lenovo:master' into master

This commit is contained in:
weragrzeda
2024-04-23 14:59:57 +02:00
committed by GitHub
104 changed files with 1823 additions and 362 deletions

View File

@@ -21,6 +21,7 @@
import optparse
import os
import re
import select
import sys
@@ -84,6 +85,7 @@ fullline = sys.stdin.readline()
printpending = True
clearpending = False
holdoff = 0
padded = None
while fullline:
for line in fullline.split('\n'):
if not line:
@@ -92,13 +94,18 @@ while fullline:
line = 'UNKNOWN: ' + line
if options.log:
node, output = line.split(':', 1)
output = output.lstrip()
if padded is None:
if output.startswith(' '):
padded = True
else:
padded = False
if padded:
output = re.sub(r'^ ', '', output)
currlog = options.log.format(node=node, nodename=node)
with open(currlog, mode='a') as log:
log.write(output + '\n')
continue
node, output = line.split(':', 1)
output = output.lstrip()
grouped.add_line(node, output)
if options.watch:
if not holdoff:

173
confluent_client/bin/l2traceroute Executable file
View File

@@ -0,0 +1,173 @@
#!/usr/libexec/platform-python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'tkucherera'
import optparse
import os
import signal
import sys
import subprocess
try:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError:
pass
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.realpath(os.path.join(path, '..', 'lib', 'python'))
if path.startswith('/opt'):
sys.path.append(path)
import confluent.client as client
argparser = optparse.OptionParser(
usage="Usage: %prog <start_node> -i <interface> <end_node> -e <eface>",
)
argparser.add_option('-i', '--interface', type='str',
help='interface to check path against for the start node')
argparser.add_option('-e', '--eface', type='str',
help='interface to check path against for the end node')
argparser.add_option('-c', '--cumulus', action="store_true", dest="cumulus",
help='return layer 2 route through cumulus switches only')
(options, args) = argparser.parse_args()
try:
start_node = args[0]
end_node = args[1]
interface = options.interface
eface = options.eface
except IndexError:
argparser.print_help()
sys.exit(1)
session = client.Command()
def get_neighbors(switch):
switch_neigbors = []
url = '/networking/neighbors/by-switch/{0}/by-peername/'.format(switch)
for neighbor in session.read(url):
switch = neighbor['item']['href'].strip('/')
if switch in all_switches:
switch_neigbors.append(switch)
return switch_neigbors
def find_path(start, end, path=[]):
path = path + [start]
if start == end:
return path # If start and end are the same, return the path
for node in get_neighbors(start):
if node not in path:
new_path = find_path(node, end, path)
if new_path:
return new_path # If a path is found, return it
return None # If no path is found, return None
def is_cumulus(switch):
try:
read_attrib = subprocess.check_output(['nodeattrib', switch, 'hardwaremanagement.method'])
except subprocess.CalledProcessError:
return False
for attribs in read_attrib.decode('utf-8').split('\n'):
if len(attribs.split(':')) > 1:
attrib = attribs.split(':')
if attrib[2].strip() == 'affluent':
return True
else:
return False
else:
return False
def host_to_switch(node, interface=None):
# first check the the node config to see what switches are connected
# if host is in rhel can use nmstate package
if node in all_switches:
return [node]
switches = []
netarg = 'net.*.switch'
if interface:
netarg = 'net.{0}.switch'.format(interface)
try:
read_attrib = subprocess.check_output(['nodeattrib', node, netarg])
except subprocess.CalledProcessError:
return False
for attribs in read_attrib.decode('utf-8').split('\n'):
attrib = attribs.split(':')
try:
if ' net.mgt.switch' in attrib or attrib[2] == '':
continue
except IndexError:
continue
switch = attrib[2].strip()
if is_cumulus(switch) and options.cumulus:
switches.append(switch)
else:
switches.append(switch)
return switches
def path_between_nodes(start_switches, end_switches):
for start_switch in start_switches:
for end_switch in end_switches:
if start_switch == end_switch:
return [start_switch]
else:
path = find_path(start_switch, end_switch)
if path:
return path
else:
return 'No path found'
all_switches = []
for res in session.read('/networking/neighbors/by-switch/'):
if 'error' in res:
sys.stderr.write(res['error'] + '\n')
exitcode = 1
else:
switch = (res['item']['href'].replace('/', ''))
all_switches.append(switch)
end_nodeslist = []
nodelist = '/noderange/{0}/nodes/'.format(end_node)
for res in session.read(nodelist):
if 'error' in res:
sys.stderr.write(res['error'] + '\n')
exitcode = 1
else:
elem=(res['item']['href'].replace('/', ''))
end_nodeslist.append(elem)
start_switches = host_to_switch(start_node, interface)
for end_node in end_nodeslist:
if end_node:
end_switches = host_to_switch(end_node, eface)
if not end_switches:
print('Error: net.{0}.switch attribute is not valid')
continue
path = path_between_nodes(start_switches, end_switches)
print(f'{start_node} to {end_node}: {path}')
# TODO dont put switches that are connected through management interfaces.

View File

@@ -102,9 +102,9 @@ def run():
cmdv = ['ssh', sshnode] + cmdvbase + cmdstorun[0]
if currprocs < concurrentprocs:
currprocs += 1
run_cmdv(node, cmdv, all, pipedesc)
run_cmdv(sshnode, cmdv, all, pipedesc)
else:
pendingexecs.append((node, cmdv))
pendingexecs.append((sshnode, cmdv))
if not all or exitcode:
sys.exit(exitcode)
rdy, _, _ = select.select(all, [], [], 10)

View File

@@ -126,13 +126,14 @@ elif options.set:
argset = argset.strip()
if argset:
arglist += shlex.split(argset)
argset = argfile.readline()
argset = argfile.readline()
session.stop_if_noderange_over(noderange, options.maxnodes)
exitcode=client.updateattrib(session,arglist,nodetype, noderange, options, None)
if exitcode != 0:
sys.exit(exitcode)
# Lists all attributes
if len(args) > 0:
# setting output to all so it can search since if we do have something to search, we want to show all outputs even if it is blank.
if requestargs is None:

View File

@@ -90,17 +90,6 @@ def main(args):
if 'error' in rsp:
sys.stderr.write(rsp['error'] + '\n')
sys.exit(1)
if not args.clear and args.network and not args.prepareonly:
rc = c.simple_noderange_command(args.noderange, '/boot/nextdevice', 'network',
bootmode='uefi',
persistent=False,
errnodes=errnodes)
if errnodes:
sys.stderr.write(
'Unable to set boot device for following nodes: {0}\n'.format(
','.join(errnodes)))
return 1
rc |= c.simple_noderange_command(args.noderange, '/power/state', 'boot')
if args.clear:
cleararm(args.noderange, c)
clearpending(args.noderange, c)
@@ -120,7 +109,7 @@ def main(args):
for profname in profnames:
sys.stderr.write(' ' + profname + '\n')
else:
sys.stderr.write('No deployment profiles available, try osdeploy fiimport or imgutil capture\n')
sys.stderr.write('No deployment profiles available, try osdeploy import or imgutil capture\n')
sys.exit(1)
armonce(args.noderange, c)
setpending(args.noderange, args.profile, c)
@@ -166,6 +155,17 @@ def main(args):
else:
print('{0}: {1}{2}'.format(node, profile, armed))
sys.exit(0)
if not args.clear and args.network and not args.prepareonly:
rc = c.simple_noderange_command(args.noderange, '/boot/nextdevice', 'network',
bootmode='uefi',
persistent=False,
errnodes=errnodes)
if errnodes:
sys.stderr.write(
'Unable to set boot device for following nodes: {0}\n'.format(
','.join(errnodes)))
return 1
rc |= c.simple_noderange_command(args.noderange, '/power/state', 'boot')
if args.network and not args.prepareonly:
return rc
return 0

View File

@@ -68,7 +68,7 @@ def main():
else:
elem=(res['item']['href'].replace('/', ''))
list.append(elem)
print(options.delim.join(list))
print(options.delim.join(list))
sys.exit(exitcode)

View File

@@ -668,6 +668,9 @@ def updateattrib(session, updateargs, nodetype, noderange, options, dictassign=N
for attrib in updateargs[1:]:
keydata[attrib] = None
for res in session.update(targpath, keydata):
for node in res.get('databynode', {}):
for warnmsg in res['databynode'][node].get('_warnings', []):
sys.stderr.write('Warning: ' + warnmsg + '\n')
if 'error' in res:
if 'errorcode' in res:
exitcode = res['errorcode']

View File

@@ -98,17 +98,24 @@ class GroupedData(object):
self.byoutput = {}
self.header = {}
self.client = confluentconnection
self.detectedpad = None
def generate_byoutput(self):
self.byoutput = {}
thepad = self.detectedpad if self.detectedpad else ''
for n in self.bynode:
output = '\n'.join(self.bynode[n])
output = ''
for ln in self.bynode[n]:
output += ln.replace(thepad, '', 1) + '\n'
if output not in self.byoutput:
self.byoutput[output] = set([n])
else:
self.byoutput[output].add(n)
def add_line(self, node, line):
wspc = re.search(r'^\s*', line).group()
if self.detectedpad is None or len(wspc) < len(self.detectedpad):
self.detectedpad = wspc
if node not in self.bynode:
self.bynode[node] = [line]
else:
@@ -219,4 +226,4 @@ if __name__ == '__main__':
if not line:
continue
groupoutput.add_line(*line.split(': ', 1))
groupoutput.print_deviants()
groupoutput.print_deviants()

View File

@@ -153,11 +153,11 @@ _confluent_osimage_completion()
{
_confluent_get_args
if [ $NUMARGS == 2 ]; then
COMPREPLY=($(compgen -W "initialize import updateboot rebase" -- ${COMP_WORDS[COMP_CWORD]}))
COMPREPLY=($(compgen -W "initialize import importcheck updateboot rebase" -- ${COMP_WORDS[COMP_CWORD]}))
return
elif [ ${CMPARGS[1]} == 'initialize' ]; then
COMPREPLY=($(compgen -W "-h -u -s -t -i" -- ${COMP_WORDS[COMP_CWORD]}))
elif [ ${CMPARGS[1]} == 'import' ]; then
elif [ ${CMPARGS[1]} == 'import' ] || [ ${CMPARGS[1]} == 'importcheck' ]; then
compopt -o default
COMPREPLY=()
return

View File

@@ -0,0 +1,38 @@
l2traceroute(8) -- returns the layer 2 route through an Ethernet network managed by confluent given 2 end points.
==============================
## SYNOPSIS
`l2traceroute [options] <start_node> <end_noderange>`
## DESCRIPTION
**l2traceroute** is a command that returns the layer 2 route for the configered interfaces in nodeattrib.
It can also be used with the -i and -e options to check against specific interfaces on the endpoints.
## PREREQUISITES
**l2traceroute** the net.<interface>.switch attributes have to be set on the end points if endpoint is not a switch
## OPTIONS
* ` -e` EFACE, --eface=INTERFACE
interface to check against for the second end point
* ` -i` INTERFACE, --interface=INTERFACE
interface to check against for the first end point
* ` -c` CUMULUS, --cumulus=CUMULUS
return layer 2 route through cumulus switches only
* `-h`, `--help`:
Show help message and exit
## EXAMPLES
* Checking route between two nodes:
`# l2traceroute_client n244 n1851`
`n244 to n1851: ['switch114']`
* Checking route from one node to multiple nodes:
`# l2traceroute_client n244 n1833,n1851`
`n244 to n1833: ['switch114', 'switch7', 'switch32', 'switch253', 'switch85', 'switch72', 'switch21', 'switch2', 'switch96', 'switch103', 'switch115']
n244 to n1851: ['switch114']`

View File

@@ -151,13 +151,14 @@ class NetplanManager(object):
needcfgapply = False
for devname in devnames:
needcfgwrite = False
if stgs['ipv6_method'] == 'static':
# ipv6_method missing at uconn...
if stgs.get('ipv6_method', None) == 'static':
curraddr = stgs['ipv6_address']
currips = self.getcfgarrpath([devname, 'addresses'])
if curraddr not in currips:
needcfgwrite = True
currips.append(curraddr)
if stgs['ipv4_method'] == 'static':
if stgs.get('ipv4_method', None) == 'static':
curraddr = stgs['ipv4_address']
currips = self.getcfgarrpath([devname, 'addresses'])
if curraddr not in currips:
@@ -180,7 +181,7 @@ class NetplanManager(object):
if dnsips:
currdnsips = self.getcfgarrpath([devname, 'nameservers', 'addresses'])
for dnsip in dnsips:
if dnsip not in currdnsips:
if dnsip and dnsip not in currdnsips:
needcfgwrite = True
currdnsips.append(dnsip)
if dnsdomain:
@@ -294,7 +295,8 @@ class WickedManager(object):
class NetworkManager(object):
def __init__(self, devtypes):
def __init__(self, devtypes, deploycfg):
self.deploycfg = deploycfg
self.connections = {}
self.uuidbyname = {}
self.uuidbydev = {}
@@ -344,7 +346,7 @@ class NetworkManager(object):
bondcfg[stg] = deats[stg]
if member in self.uuidbyname:
subprocess.check_call(['nmcli', 'c', 'del', self.uuidbyname[member]])
subprocess.check_call(['nmcli', 'c', 'add', 'type', 'team-slave', 'master', team, 'con-name', member, 'connection.interface-name', member])
subprocess.check_call(['nmcli', 'c', 'add', 'type', 'bond-slave', 'master', team, 'con-name', member, 'connection.interface-name', member])
if bondcfg:
args = []
for parm in bondcfg:
@@ -366,6 +368,20 @@ class NetworkManager(object):
cmdargs['ipv4.gateway'] = stgs['ipv4_gateway']
if stgs.get('ipv6_gateway', None):
cmdargs['ipv6.gateway'] = stgs['ipv6_gateway']
dnsips = self.deploycfg.get('nameservers', [])
if not dnsips:
dnsips = []
dns4 = []
dns6 = []
for dnsip in dnsips:
if '.' in dnsip:
dns4.append(dnsip)
elif ':' in dnsip:
dns6.append(dnsip)
if dns4:
cmdargs['ipv4.dns'] = ','.join(dns4)
if dns6:
cmdargs['ipv6.dns'] = ','.join(dns6)
if len(cfg['interfaces']) > 1: # team time.. should be..
if not cfg['settings'].get('team_mode', None):
sys.stderr.write("Warning, multiple interfaces ({0}) without a team_mode, skipping setup\n".format(','.join(cfg['interfaces'])))
@@ -378,7 +394,9 @@ class NetworkManager(object):
for arg in cmdargs:
cargs.append(arg)
cargs.append(cmdargs[arg])
subprocess.check_call(['nmcli', 'c', 'add', 'type', 'team', 'con-name', cname, 'connection.interface-name', cname, 'team.runner', stgs['team_mode']] + cargs)
if stgs['team_mode'] == 'lacp':
stgs['team_mode'] = '802.3ad'
subprocess.check_call(['nmcli', 'c', 'add', 'type', 'bond', 'con-name', cname, 'connection.interface-name', cname, 'bond.options', 'mode={}'.format(stgs['team_mode'])] + cargs)
for iface in cfg['interfaces']:
self.add_team_member(cname, iface)
subprocess.check_call(['nmcli', 'c', 'u', cname])
@@ -484,7 +502,7 @@ if __name__ == '__main__':
if os.path.exists('/usr/sbin/netplan'):
nm = NetplanManager(dc)
if os.path.exists('/usr/bin/nmcli'):
nm = NetworkManager(devtypes)
nm = NetworkManager(devtypes, dc)
elif os.path.exists('/usr/sbin/wicked'):
nm = WickedManager()
for netn in netname_to_interfaces:

View File

@@ -0,0 +1,49 @@
is_suse=false
is_rhel=false
if test -f /boot/efi/EFI/redhat/grub.cfg; then
grubcfg="/boot/efi/EFI/redhat/grub.cfg"
grub2-mkconfig -o $grubcfg
is_rhel=true
elif test -f /boot/efi/EFI/sle_hpc/grub.cfg; then
grubcfg="/boot/efi/EFI/sle_hpc/grub.cfg"
grub2-mkconfig -o $grubcfg
is_suse=true
else
echo "Expected File missing: Check if os sle_hpc or redhat"
exit
fi
# working on SUSE
if $is_suse; then
start=false
num_line=0
lines_to_edit=()
while read line; do
((num_line++))
if [[ $line == *"grub_platform"* ]]; then
start=true
fi
if $start; then
if [[ $line != "#"* ]];then
lines_to_edit+=($num_line)
fi
fi
if [[ ${#line} -eq 2 && $line == *"fi" ]]; then
if $start; then
start=false
fi
fi
done < grub_cnf.cfg
for line_num in "${lines_to_edit[@]}"; do
line_num+="s"
sed -i "${line_num},^,#," $grubcfg
done
sed -i 's,^terminal,#terminal,' $grubcfg
fi
# Working on Redhat
if $is_rhel; then
sed -i 's,^serial,#serial, ; s,^terminal,#terminal,' $grubcfg
fi

View File

@@ -26,7 +26,7 @@ mkdir -p opt/confluent/bin
mkdir -p stateless-bin
cp -a el8bin/* .
ln -s el8 el9
for os in rhvh4 el7 genesis el8 suse15 ubuntu20.04 ubuntu22.04 coreos el9; do
for os in rhvh4 el7 genesis el8 suse15 ubuntu20.04 ubuntu22.04 ubuntu24.04 coreos el9; do
mkdir ${os}out
cd ${os}out
if [ -d ../${os}bin ]; then
@@ -76,7 +76,7 @@ cp -a esxi7 esxi8
%install
mkdir -p %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/
#cp LICENSE %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/
for os in rhvh4 el7 el8 el9 genesis suse15 ubuntu20.04 ubuntu22.04 esxi6 esxi7 esxi8 coreos; do
for os in rhvh4 el7 el8 el9 genesis suse15 ubuntu20.04 ubuntu22.04 ubuntu24.04 esxi6 esxi7 esxi8 coreos; do
mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs/aarch64/
cp ${os}out/addons.* %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs/aarch64/
if [ -d ${os}disklessout ]; then

View File

@@ -28,7 +28,7 @@ This contains support utilities for enabling deployment of x86_64 architecture s
#cp start_root urlmount ../stateless-bin/
#cd ..
ln -s el8 el9
for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 coreos el9; do
for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 ubuntu24.04 coreos el9; do
mkdir ${os}out
cd ${os}out
if [ -d ../${os}bin ]; then
@@ -42,7 +42,7 @@ for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 coreo
mv ../addons.cpio .
cd ..
done
for os in el7 el8 suse15 el9 ubuntu20.04 ubuntu22.04; do
for os in el7 el8 suse15 el9 ubuntu20.04 ubuntu22.04 ubuntu24.04; do
mkdir ${os}disklessout
cd ${os}disklessout
if [ -d ../${os}bin ]; then
@@ -78,7 +78,7 @@ cp -a esxi7 esxi8
%install
mkdir -p %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/
cp LICENSE %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/
for os in rhvh4 el7 el8 el9 genesis suse15 ubuntu20.04 ubuntu18.04 ubuntu22.04 esxi6 esxi7 esxi8 coreos; do
for os in rhvh4 el7 el8 el9 genesis suse15 ubuntu20.04 ubuntu18.04 ubuntu22.04 ubuntu24.04 esxi6 esxi7 esxi8 coreos; do
mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs
mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/$os/profiles
cp ${os}out/addons.* %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -1,4 +1,5 @@
#!/usr/bin/python
import time
import importlib
import tempfile
import json
@@ -223,6 +224,7 @@ def synchronize():
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(2)
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -5,6 +5,7 @@ import json
import os
import shutil
import pwd
import time
import grp
try:
from importlib.machinery import SourceFileLoader
@@ -223,6 +224,7 @@ def synchronize():
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(2)
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')

View File

@@ -155,7 +155,7 @@ fi
ready=0
while [ $ready = "0" ]; do
get_remote_apikey
if [[ $confluent_mgr == *:* ]]; then
if [[ $confluent_mgr == *:* ]] && [[ $confluent_mgr != "["* ]]; then
confluent_mgr="[$confluent_mgr]"
fi
tmperr=$(mktemp)
@@ -189,7 +189,7 @@ cat > /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
EOC
echo id=${ifname} >> /run/NetworkManager/system-connections/$ifname.nmconnection
echo uuid=$(uuidgen) >> /run/NetworkManager/system-connections/$ifname.nmconnection
linktype=$(ip link |grep -A2 ${ifname}|tail -n 1|awk '{print $1}')
linktype=$(ip link show dev ${ifname}|grep link/|awk '{print $1}')
if [ "$linktype" = link/infiniband ]; then
linktype="infiniband"
else
@@ -324,7 +324,7 @@ fi
echo '[proxy]' >> /run/NetworkManager/system-connections/$ifname.nmconnection
chmod 600 /run/NetworkManager/system-connections/*.nmconnection
confluent_websrv=$confluent_mgr
if [[ $confluent_websrv == *:* ]]; then
if [[ $confluent_websrv == *:* ]] && [[ $confluent_websrv != "["* ]]; then
confluent_websrv="[$confluent_websrv]"
fi
echo -n "Initializing ssh..."

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -10,6 +10,7 @@ import stat
import struct
import sys
import subprocess
import traceback
bootuuid = None
@@ -426,4 +427,9 @@ def install_to_disk(imgpath):
if __name__ == '__main__':
install_to_disk(os.environ['mountsrc'])
try:
install_to_disk(os.environ['mountsrc'])
except Exception:
traceback.print_exc()
time.sleep(86400)
raise

View File

@@ -1,6 +1,6 @@
. /lib/dracut-lib.sh
confluent_whost=$confluent_mgr
if [[ "$confluent_whost" == *:* ]]; then
if [[ "$confluent_whost" == *:* ]] && [[ "$confluent_whost" != "["* ]]; then
confluent_whost="[$confluent_mgr]"
fi
mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay

View File

@@ -16,6 +16,7 @@ if [ -z "$confluent_mgr" ]; then
fi
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
timedatectl set-timezone $(grep ^timezone: /etc/confluent/confluent.deploycfg|awk '{print $2}')
hostnamectl set-hostname $nodename
export nodename confluent_mgr confluent_profile
. /etc/confluent/functions
mkdir -p /var/log/confluent

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -227,12 +227,18 @@ if [ "$textconsole" = "true" ] && ! grep console= /proc/cmdline > /dev/null; the
fi
fi
echo inst.repo=$proto://$mgr/confluent-public/os/$profilename/distribution >> /etc/cmdline.d/01-confluent.conf
. /etc/os-release
ISOSRC=$(blkid -t TYPE=iso9660|grep -Ei ' LABEL="'$ID-$VERSION_ID|sed -e s/:.*//)
if [ -z "$ISOSRC" ]; then
echo inst.repo=$proto://$mgr/confluent-public/os/$profilename/distribution >> /etc/cmdline.d/01-confluent.conf
root=anaconda-net:$proto://$mgr/confluent-public/os/$profilename/distribution
export root
else
echo inst.repo=cdrom:$ISOSRC >> /etc/cmdline.d/01-confluent.conf
fi
echo inst.ks=$proto://$mgr/confluent-public/os/$profilename/kickstart >> /etc/cmdline.d/01-confluent.conf
kickstart=$proto://$mgr/confluent-public/os/$profilename/kickstart
root=anaconda-net:$proto://$mgr/confluent-public/os/$profilename/distribution
export kickstart
export root
autoconfigmethod=$(grep ipv4_method /etc/confluent/confluent.deploycfg)
autoconfigmethod=${autoconfigmethod#ipv4_method: }
if [ "$autoconfigmethod" = "dhcp" ]; then

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -1,8 +1,12 @@
#!/bin/sh
grep HostCert /etc/ssh/sshd_config.anaconda >> /mnt/sysimage/etc/ssh/sshd_config
echo HostbasedAuthentication yes >> /mnt/sysimage/etc/ssh/sshd_config
echo HostbasedUsesNameFromPacketOnly yes >> /mnt/sysimage/etc/ssh/sshd_config
echo IgnoreRhosts no >> /mnt/sysimage/etc/ssh/sshd_config
targssh=/mnt/sysimage/etc/ssh/sshd_config
if [ -d /mnt/sysimage/etc/ssh/sshd_config.d/ ]; then
targssh=/mnt/sysimage/etc/ssh/sshd_config.d/90-confluent.conf
fi
grep HostCert /etc/ssh/sshd_config.anaconda >> $targssh
echo HostbasedAuthentication yes >> $targssh
echo HostbasedUsesNameFromPacketOnly yes >> $targssh
echo IgnoreRhosts no >> $targssh
sshconf=/mnt/sysimage/etc/ssh/ssh_config
if [ -d /mnt/sysimage/etc/ssh/ssh_config.d/ ]; then
sshconf=/mnt/sysimage/etc/ssh/ssh_config.d/01-confluent.conf

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -120,7 +120,7 @@ fi
ready=0
while [ $ready = "0" ]; do
get_remote_apikey
if [[ $confluent_mgr == *:* ]]; then
if [[ $confluent_mgr == *:* ]] && [[ $confluent_mgr != "["* ]]; then
confluent_mgr="[$confluent_mgr]"
fi
tmperr=$(mktemp)
@@ -154,7 +154,7 @@ cat > /run/NetworkManager/system-connections/$ifname.nmconnection << EOC
EOC
echo id=${ifname} >> /run/NetworkManager/system-connections/$ifname.nmconnection
echo uuid=$(uuidgen) >> /run/NetworkManager/system-connections/$ifname.nmconnection
linktype=$(ip link |grep -A2 ${ifname}|tail -n 1|awk '{print $1}')
linktype=$(ip link show dev ${ifname}|grep link/|awk '{print $1}')
if [ "$linktype" = link/infiniband ]; then
linktype="infiniband"
else
@@ -281,7 +281,7 @@ fi
echo '[proxy]' >> /run/NetworkManager/system-connections/$ifname.nmconnection
chmod 600 /run/NetworkManager/system-connections/*.nmconnection
confluent_websrv=$confluent_mgr
if [[ $confluent_websrv == *:* ]]; then
if [[ $confluent_websrv == *:* ]] && [[ $confluent_websrv != "["* ]]; then
confluent_websrv="[$confluent_websrv]"
fi
echo -n "Initializing ssh..."

View File

@@ -9,9 +9,16 @@ HOME=$(getent passwd $(whoami)|cut -d: -f 6)
export HOME
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg|awk '{print $2}')
if [ -z "$confluent_mgr" ] || [ "$confluent_mgr" == "null" ] || ! ping -c 1 $confluent_mgr >& /dev/null; then
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
fi
confluent_websrv=$confluent_mgr
if [[ "$confluent_mgr" == *:* ]]; then
confluent_websrv="[$confluent_mgr]"
fi
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
export nodename confluent_mgr confluent_profile
export nodename confluent_mgr confluent_profile confluent_websrv
. /etc/confluent/functions
(
exec >> /var/log/confluent/confluent-firstboot.log
@@ -34,7 +41,7 @@ if [ ! -f /etc/confluent/firstboot.ran ]; then
run_remote_config firstboot.d
fi
curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_websrv/confluent-api/self/updatestatus
systemctl disable firstboot
rm /etc/systemd/system/firstboot.service
rm /etc/confluent/firstboot.ran

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -10,8 +10,16 @@ import stat
import struct
import sys
import subprocess
import traceback
bootuuid = None
vgname = 'localstorage'
oldvgname = None
def convert_lv(oldlvname):
if oldvgname is None:
return None
return oldlvname.replace(oldvgname, vgname)
def get_partname(devname, idx):
if devname[-1] in '0123456789':
@@ -53,6 +61,8 @@ def get_image_metadata(imgpath):
header = img.read(16)
if header == b'\x63\x7b\x9d\x26\xb7\xfd\x48\x30\x89\xf9\x11\xcf\x18\xfd\xff\xa1':
for md in get_multipart_image_meta(img):
if md.get('device', '').startswith('/dev/zram'):
continue
yield md
else:
raise Exception('Installation from single part image not supported')
@@ -86,14 +96,14 @@ def fixup(rootdir, vols):
if tab.startswith('#ORIGFSTAB#'):
if entry[1] in devbymount:
targetdev = devbymount[entry[1]]
if targetdev.startswith('/dev/localstorage/'):
if targetdev.startswith('/dev/{}/'.format(vgname)):
entry[0] = targetdev
else:
uuid = subprocess.check_output(['blkid', '-s', 'UUID', '-o', 'value', targetdev]).decode('utf8')
uuid = uuid.strip()
entry[0] = 'UUID={}'.format(uuid)
elif entry[2] == 'swap':
entry[0] = '/dev/mapper/localstorage-swap'
entry[0] = '/dev/mapper/{}-swap'.format(vgname.replace('-', '--'))
entry[0] = entry[0].ljust(42)
entry[1] = entry[1].ljust(16)
entry[3] = entry[3].ljust(28)
@@ -141,6 +151,46 @@ def fixup(rootdir, vols):
grubsyscfg = os.path.join(rootdir, 'etc/sysconfig/grub')
if not os.path.exists(grubsyscfg):
grubsyscfg = os.path.join(rootdir, 'etc/default/grub')
kcmdline = os.path.join(rootdir, 'etc/kernel/cmdline')
if os.path.exists(kcmdline):
with open(kcmdline) as kcmdlinein:
kcmdlinecontent = kcmdlinein.read()
newkcmdlineent = []
for ent in kcmdlinecontent.split():
if ent.startswith('resume='):
newkcmdlineent.append('resume={}'.format(newswapdev))
elif ent.startswith('root='):
newkcmdlineent.append('root={}'.format(newrootdev))
elif ent.startswith('rd.lvm.lv='):
ent = convert_lv(ent)
if ent:
newkcmdlineent.append(ent)
else:
newkcmdlineent.append(ent)
with open(kcmdline, 'w') as kcmdlineout:
kcmdlineout.write(' '.join(newkcmdlineent) + '\n')
for loadent in glob.glob(os.path.join(rootdir, 'boot/loader/entries/*.conf')):
with open(loadent) as loadentin:
currentry = loadentin.read().split('\n')
with open(loadent, 'w') as loadentout:
for cfgline in currentry:
cfgparts = cfgline.split()
if not cfgparts or cfgparts[0] != 'options':
loadentout.write(cfgline + '\n')
continue
newcfgparts = [cfgparts[0]]
for cfgpart in cfgparts[1:]:
if cfgpart.startswith('root='):
newcfgparts.append('root={}'.format(newrootdev))
elif cfgpart.startswith('resume='):
newcfgparts.append('resume={}'.format(newswapdev))
elif cfgpart.startswith('rd.lvm.lv='):
cfgpart = convert_lv(cfgpart)
if cfgpart:
newcfgparts.append(cfgpart)
else:
newcfgparts.append(cfgpart)
loadentout.write(' '.join(newcfgparts) + '\n')
with open(grubsyscfg) as defgrubin:
defgrub = defgrubin.read().split('\n')
with open(grubsyscfg, 'w') as defgrubout:
@@ -148,9 +198,18 @@ def fixup(rootdir, vols):
gline = gline.split()
newline = []
for ent in gline:
if ent.startswith('resume=') or ent.startswith('rd.lvm.lv'):
continue
newline.append(ent)
if ent.startswith('resume='):
newline.append('resume={}'.format(newswapdev))
elif ent.startswith('root='):
newline.append('root={}'.format(newrootdev))
elif ent.startswith('rd.lvm.lv='):
ent = convert_lv(ent)
if ent:
newline.append(ent)
elif '""' in ent:
newline.append('""')
else:
newline.append(ent)
defgrubout.write(' '.join(newline) + '\n')
grubcfg = subprocess.check_output(['find', os.path.join(rootdir, 'boot'), '-name', 'grub.cfg']).decode('utf8').strip().replace(rootdir, '/').replace('//', '/')
grubcfg = grubcfg.split('\n')
@@ -227,8 +286,14 @@ def had_swap():
return True
return False
newrootdev = None
newswapdev = None
def install_to_disk(imgpath):
global bootuuid
global newrootdev
global newswapdev
global vgname
global oldvgname
lvmvols = {}
deftotsize = 0
mintotsize = 0
@@ -260,6 +325,13 @@ def install_to_disk(imgpath):
biggestfs = fs
biggestsize = fs['initsize']
if fs['device'].startswith('/dev/mapper'):
oldvgname = fs['device'].rsplit('/', 1)[-1]
# if node has - then /dev/mapper will double up the hypen
if '_' in oldvgname and '-' in oldvgname.split('_')[-1]:
oldvgname = oldvgname.rsplit('-', 1)[0].replace('--', '-')
osname = oldvgname.split('_')[0]
nodename = socket.gethostname().split('.')[0]
vgname = '{}_{}'.format(osname, nodename)
lvmvols[fs['device'].replace('/dev/mapper/', '')] = fs
deflvmsize += fs['initsize']
minlvmsize += fs['minsize']
@@ -304,6 +376,8 @@ def install_to_disk(imgpath):
end = sectors
parted.run('mkpart primary {}s {}s'.format(curroffset, end))
vol['targetdisk'] = get_partname(instdisk, volidx)
if vol['mount'] == '/':
newrootdev = vol['targetdisk']
curroffset += size + 1
if not lvmvols:
if swapsize:
@@ -313,13 +387,14 @@ def install_to_disk(imgpath):
if end > sectors:
end = sectors
parted.run('mkpart swap {}s {}s'.format(curroffset, end))
subprocess.check_call(['mkswap', get_partname(instdisk, volidx + 1)])
newswapdev = get_partname(instdisk, volidx + 1)
subprocess.check_call(['mkswap', newswapdev])
else:
parted.run('mkpart lvm {}s 100%'.format(curroffset))
lvmpart = get_partname(instdisk, volidx + 1)
subprocess.check_call(['pvcreate', '-ff', '-y', lvmpart])
subprocess.check_call(['vgcreate', 'localstorage', lvmpart])
vginfo = subprocess.check_output(['vgdisplay', 'localstorage', '--units', 'b']).decode('utf8')
subprocess.check_call(['vgcreate', vgname, lvmpart])
vginfo = subprocess.check_output(['vgdisplay', vgname, '--units', 'b']).decode('utf8')
vginfo = vginfo.split('\n')
pesize = 0
pes = 0
@@ -346,13 +421,17 @@ def install_to_disk(imgpath):
extents += 1
if vol['mount'] == '/':
lvname = 'root'
else:
lvname = vol['mount'].replace('/', '_')
subprocess.check_call(['lvcreate', '-l', '{}'.format(extents), '-y', '-n', lvname, 'localstorage'])
vol['targetdisk'] = '/dev/localstorage/{}'.format(lvname)
subprocess.check_call(['lvcreate', '-l', '{}'.format(extents), '-y', '-n', lvname, vgname])
vol['targetdisk'] = '/dev/{}/{}'.format(vgname, lvname)
if vol['mount'] == '/':
newrootdev = vol['targetdisk']
if swapsize:
subprocess.check_call(['lvcreate', '-y', '-l', '{}'.format(swapsize // pesize), '-n', 'swap', 'localstorage'])
subprocess.check_call(['mkswap', '/dev/localstorage/swap'])
subprocess.check_call(['lvcreate', '-y', '-l', '{}'.format(swapsize // pesize), '-n', 'swap', vgname])
subprocess.check_call(['mkswap', '/dev/{}/swap'.format(vgname)])
newswapdev = '/dev/{}/swap'.format(vgname)
os.makedirs('/run/imginst/targ')
for vol in allvols:
with open(vol['targetdisk'], 'wb') as partition:
@@ -426,4 +505,9 @@ def install_to_disk(imgpath):
if __name__ == '__main__':
install_to_disk(os.environ['mountsrc'])
try:
install_to_disk(os.environ['mountsrc'])
except Exception:
traceback.print_exc()
time.sleep(86400)
raise

View File

@@ -1,6 +1,6 @@
. /lib/dracut-lib.sh
confluent_whost=$confluent_mgr
if [[ "$confluent_whost" == *:* ]]; then
if [[ "$confluent_whost" == *:* ]] && [[ "$confluent_whost" != "["* ]]; then
confluent_whost="[$confluent_mgr]"
fi
mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay

View File

@@ -30,6 +30,7 @@ if [ ! -f /sysroot/tmp/installdisk ]; then
done
fi
lvm vgchange -a n
/sysroot/usr/sbin/wipefs -a /dev/$(cat /sysroot/tmp/installdisk)
udevadm control -e
if [ -f /sysroot/etc/lvm/devices/system.devices ]; then
rm /sysroot/etc/lvm/devices/system.devices

View File

@@ -16,6 +16,7 @@ if [ -z "$confluent_mgr" ]; then
fi
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
timedatectl set-timezone $(grep ^timezone: /etc/confluent/confluent.deploycfg|awk '{print $2}')
hostnamectl set-hostname $nodename
export nodename confluent_mgr confluent_profile
. /etc/confluent/functions
mkdir -p /var/log/confluent

View File

@@ -5,9 +5,16 @@
nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}')
confluent_apikey=$(cat /etc/confluent/confluent.apikey)
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
export nodename confluent_mgr confluent_profile
confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg|awk '{print $2}')
if [ -z "$confluent_mgr" ] || [ "$confluent_mgr" == "null" ] || ! ping -c 1 $confluent_mgr >& /dev/null; then
confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}')
fi
confluent_websrv=$confluent_mgr
if [[ "$confluent_mgr" == *:* ]]; then
confluent_websrv="[$confluent_mgr]"
fi
export nodename confluent_mgr confluent_profile confluent_websrv
. /etc/confluent/functions
mkdir -p /var/log/confluent
chmod 700 /var/log/confluent
@@ -16,9 +23,9 @@ exec 2>> /var/log/confluent/confluent-post.log
chmod 600 /var/log/confluent/confluent-post.log
tail -f /var/log/confluent/confluent-post.log > /dev/console &
logshowpid=$!
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.service > /etc/systemd/system/firstboot.service
curl -f https://$confluent_websrv/confluent-public/os/$confluent_profile/scripts/firstboot.service > /etc/systemd/system/firstboot.service
mkdir -p /opt/confluent/bin
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /opt/confluent/bin/firstboot.sh
curl -f https://$confluent_websrv/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /opt/confluent/bin/firstboot.sh
chmod +x /opt/confluent/bin/firstboot.sh
systemctl enable firstboot
selinuxpolicy=$(grep ^SELINUXTYPE /etc/selinux/config |awk -F= '{print $2}')
@@ -33,7 +40,7 @@ run_remote_parts post.d
# Induce execution of remote configuration, e.g. ansible plays in ansible/post.d/
run_remote_config post.d
curl -sf -X POST -d 'status: staged' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus
curl -sf -X POST -d 'status: staged' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_websrv/confluent-api/self/updatestatus
kill $logshowpid

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -174,6 +174,8 @@ dnsdomain=${dnsdomain#dnsdomain: }
echo search $dnsdomain >> /etc/resolv.conf
echo -n "Initializing ssh..."
ssh-keygen -A
mkdir -p /usr/share/empty.sshd
rm /etc/ssh/ssh_host_dsa_key*
for pubkey in /etc/ssh/ssh_host*key.pub; do
certfile=${pubkey/.pub/-cert.pub}
privfile=${pubkey%.pub}

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -116,7 +116,7 @@ fi
ready=0
while [ $ready = "0" ]; do
get_remote_apikey
if [[ $confluent_mgr == *:* ]]; then
if [[ $confluent_mgr == *:* ]] && [[ $confluent_mgr != "["* ]]; then
confluent_mgr="[$confluent_mgr]"
fi
tmperr=$(mktemp)

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -10,6 +10,12 @@ dynamic behavior and replace with static configuration.
<hwclock>UTC</hwclock>
<timezone>%%TIMEZONE%%</timezone>
</timezone>
<firstboot>
<firstboot_enabled config:type="boolean">false</firstboot_enabled>
</firstboot>
<kdump>
<add_crash_kernel config:type="boolean">false</add_crash_kernel>
</kdump>
<general>
<self_update config:type="boolean">false</self_update>
<mode>

View File

@@ -1,4 +1,7 @@
#!/bin/sh
# WARNING
# be careful when editing files here as this script is called
# in parallel to other copy operations, so changes to files can be lost
discnum=$(basename $1)
if [ "$discnum" != 1 ]; then exit 0; fi
if [ -e $2/boot/kernel ]; then exit 0; fi

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -0,0 +1,3 @@
#!/usr/bin/bash
# remove online repos
grep -lE "baseurl=https?://download.opensuse.org" /etc/zypp/repos.d/*repo | xargs rm --

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -10,6 +10,12 @@ dynamic behavior and replace with static configuration.
<hwclock>UTC</hwclock>
<timezone>%%TIMEZONE%%</timezone>
</timezone>
<firstboot>
<firstboot_enabled config:type="boolean">false</firstboot_enabled>
</firstboot>
<kdump>
<add_crash_kernel config:type="boolean">false</add_crash_kernel>
</kdump>
<general>
<self_update config:type="boolean">false</self_update>
<mode>

View File

@@ -1,4 +1,7 @@
#!/bin/sh
# WARNING
# be careful when editing files here as this script is called
# in parallel to other copy operations, so changes to files can be lost
discnum=$(basename $1)
if [ "$discnum" != 1 ]; then exit 0; fi
if [ -e $2/boot/kernel ]; then exit 0; fi

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -0,0 +1,3 @@
#!/usr/bin/bash
# remove online repos
grep -lE "baseurl=https?://download.opensuse.org" /etc/zypp/repos.d/*repo | xargs rm --

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -1,4 +1,5 @@
if ! grep console= /proc/cmdline > /dev/null; then
mkdir -p /custom-installation
/opt/confluent/bin/autocons > /custom-installation/autocons.info
cons=$(cat /custom-installation/autocons.info)
if [ ! -z "$cons" ]; then

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -10,6 +10,7 @@ import stat
import struct
import sys
import subprocess
import traceback
bootuuid = None
@@ -206,6 +207,8 @@ def fixup(rootdir, vols):
partnum = re.search('(\d+)$', targdev).group(1)
targblock = re.search('(.*)\d+$', targdev).group(1)
if targblock:
if targblock.endswith('p') and 'nvme' in targblock:
targblock = targblock[:-1]
shimpath = subprocess.check_output(['find', os.path.join(rootdir, 'boot/efi'), '-name', 'shimx64.efi']).decode('utf8').strip()
shimpath = shimpath.replace(rootdir, '/').replace('/boot/efi', '').replace('//', '/').replace('/', '\\')
subprocess.check_call(['efibootmgr', '-c', '-d', targblock, '-l', shimpath, '--part', partnum])
@@ -422,5 +425,10 @@ def install_to_disk(imgpath):
if __name__ == '__main__':
install_to_disk(os.environ['mountsrc'])
try:
install_to_disk(os.environ['mountsrc'])
except Exception:
traceback.print_exc()
time.sleep(86400)
raise

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -4,4 +4,5 @@ confluent_mgr=$(grep ^deploy_server $deploycfg|awk '{print $2}')
confluent_profile=$(grep ^profile: $deploycfg|awk '{print $2}')
export deploycfg confluent_mgr confluent_profile
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/post.sh > /tmp/post.sh
. /tmp/post.sh
bash /tmp/post.sh
true

View File

@@ -2,7 +2,10 @@
echo "Confluent first boot is running"
HOME=$(getent passwd $(whoami)|cut -d: -f 6)
export HOME
seems a potentially relevant thing to put i... by Jarrod Johnson
(
exec >> /target/var/log/confluent/confluent-firstboot.log
exec 2>> /target/var/log/confluent/confluent-firstboot.log
chmod 600 /target/var/log/confluent/confluent-firstboot.log
cp -a /etc/confluent/ssh/* /etc/ssh/
systemctl restart sshd
rootpw=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg |awk '{print $2}')
@@ -18,7 +21,10 @@ done
hostnamectl set-hostname $(grep ^NODENAME: /etc/confluent/confluent.info | awk '{print $2}')
touch /etc/cloud/cloud-init.disabled
source /etc/confluent/functions
confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}')
export confluent_mgr confluent_profile
run_remote_parts firstboot.d
run_remote_config firstboot.d
curl --capath /etc/confluent/tls -f -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" -X POST -d "status: complete" https://$confluent_mgr/confluent-api/self/updatestatus
) &
tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -8,7 +8,6 @@ chmod go-rwx /etc/confluent/*
for i in /custom-installation/ssh/*.ca; do
echo '@cert-authority *' $(cat $i) >> /target/etc/ssh/ssh_known_hosts
done
cp -a /etc/ssh/ssh_host* /target/etc/confluent/ssh/
cp -a /etc/ssh/sshd_config.d/confluent.conf /target/etc/confluent/ssh/sshd_config.d/
sshconf=/target/etc/ssh/ssh_config
@@ -19,10 +18,15 @@ echo 'Host *' >> $sshconf
echo ' HostbasedAuthentication yes' >> $sshconf
echo ' EnableSSHKeysign yes' >> $sshconf
echo ' HostbasedKeyTypes *ed25519*' >> $sshconf
cp /etc/confluent/functions /target/etc/confluent/functions
source /etc/confluent/functions
mkdir -p /target/var/log/confluent
cp /var/log/confluent/* /target/var/log/confluent/
(
exec >> /target/var/log/confluent/confluent-post.log
exec 2>> /target/var/log/confluent/confluent-post.log
chmod 600 /target/var/log/confluent/confluent-post.log
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /target/etc/confluent/firstboot.sh
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/functions > /target/etc/confluent/functions
source /target/etc/confluent/functions
chmod +x /target/etc/confluent/firstboot.sh
cp /tmp/allnodes /target/root/.shosts
cp /tmp/allnodes /target/etc/ssh/shosts.equiv
@@ -56,6 +60,7 @@ cp /custom-installation/confluent/bin/apiclient /target/opt/confluent/bin
mount -o bind /dev /target/dev
mount -o bind /proc /target/proc
mount -o bind /sys /target/sys
mount -o bind /sys/firmware/efi/efivars /target/sys/firmware/efi/efivars
if [ 1 = $updategrub ]; then
chroot /target update-grub
fi
@@ -83,6 +88,8 @@ chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d
source /target/etc/confluent/functions
run_remote_config post
python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged'
umount /target/sys /target/dev /target/proc
) &
tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console

View File

@@ -1,5 +1,16 @@
#!/bin/bash
deploycfg=/custom-installation/confluent/confluent.deploycfg
mkdir -p /var/log/confluent
mkdir -p /opt/confluent/bin
mkdir -p /etc/confluent
cp /custom-installation/confluent/confluent.info /custom-installation/confluent/confluent.apikey /etc/confluent/
cat /custom-installation/tls/*.pem >> /etc/confluent/ca.pem
cp /custom-installation/confluent/bin/apiclient /opt/confluent/bin
cp $deploycfg /etc/confluent/
(
exec >> /var/log/confluent/confluent-pre.log
exec 2>> /var/log/confluent/confluent-pre.log
chmod 600 /var/log/confluent/confluent-pre.log
cryptboot=$(grep encryptboot: $deploycfg|sed -e 's/^encryptboot: //')
if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then
@@ -23,7 +34,17 @@ echo HostbasedAuthentication yes >> /etc/ssh/sshd_config.d/confluent.conf
echo HostbasedUsesNameFromPacketOnly yes >> /etc/ssh/sshd_config.d/confluent.conf
echo IgnoreRhosts no >> /etc/ssh/sshd_config.d/confluent.conf
systemctl restart sshd
mkdir -p /etc/confluent
export nodename confluent_profile confluent_mgr
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/functions > /etc/confluent/functions
. /etc/confluent/functions
run_remote_parts pre.d
curl -f -X POST -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $apikey" https://$confluent_mgr/confluent-api/self/nodelist > /tmp/allnodes
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/getinstalldisk > /custom-installation/getinstalldisk
python3 /custom-installation/getinstalldisk
if [ ! -e /tmp/installdisk ]; then
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/getinstalldisk > /custom-installation/getinstalldisk
python3 /custom-installation/getinstalldisk
fi
sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml
) &
tail --pid $! -n 0 -F /var/log/confluent/confluent-pre.log > /dev/console

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -4,4 +4,5 @@ confluent_mgr=$(grep ^deploy_server $deploycfg|awk '{print $2}')
confluent_profile=$(grep ^profile: $deploycfg|awk '{print $2}')
export deploycfg confluent_mgr confluent_profile
curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/post.sh > /tmp/post.sh
. /tmp/post.sh
bash /tmp/post.sh
true

View File

@@ -3,5 +3,12 @@ sed -i 's/label: ubuntu/label: Ubuntu/' $2/profile.yaml && \
ln -s $1/casper/vmlinuz $2/boot/kernel && \
ln -s $1/casper/initrd $2/boot/initramfs/distribution && \
mkdir -p $2/boot/efi/boot && \
ln -s $1/EFI/boot/* $2/boot/efi/boot
if [ -d $1/EFI/boot/ ]; then
ln -s $1/EFI/boot/* $2/boot/efi/boot
elif [ -d $1/efi/boot/ ]; then
ln -s $1/efi/boot/* $2/boot/efi/boot
else
echo "Unrecogrized boot contents in media" >&2
exit 1
fi

View File

@@ -3,6 +3,8 @@ import os
class DiskInfo(object):
def __init__(self, devname):
if devname.startswith('nvme') and 'c' in devname:
raise Exception("Skipping multipath devname")
self.name = devname
self.wwn = None
self.path = None

View File

@@ -60,6 +60,7 @@ cp /custom-installation/confluent/bin/apiclient /target/opt/confluent/bin
mount -o bind /dev /target/dev
mount -o bind /proc /target/proc
mount -o bind /sys /target/sys
mount -o bind /sys/firmware/efi/efivars /target/sys/firmware/efi/efivars
if [ 1 = $updategrub ]; then
chroot /target update-grub
fi

View File

@@ -1,4 +1,6 @@
#!/usr/bin/python3
import random
import time
import subprocess
import importlib
import tempfile
@@ -7,6 +9,7 @@ import os
import shutil
import pwd
import grp
import sys
from importlib.machinery import SourceFileLoader
try:
apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module()
@@ -227,9 +230,16 @@ def synchronize():
myips.append(addr)
data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips})
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data)
if status >= 300:
sys.stderr.write("Error starting syncfiles - {}:\n".format(status))
sys.stderr.write(rsp.decode('utf8'))
sys.stderr.write('\n')
sys.stderr.flush()
return status
if status == 202:
lastrsp = ''
while status != 204:
time.sleep(1+(2*random.random()))
status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles')
if not isinstance(rsp, str):
rsp = rsp.decode('utf8')
@@ -277,10 +287,21 @@ def synchronize():
os.chmod(fname, int(opts[fname][opt], 8))
if uid != -1 or gid != -1:
os.chown(fname, uid, gid)
return status
finally:
shutil.rmtree(tmpdir)
shutil.rmtree(appendoncedir)
if __name__ == '__main__':
synchronize()
status = 202
while status not in (204, 200):
try:
status = synchronize()
except Exception as e:
sys.stderr.write(str(e))
sys.stderr.write('\n')
sys.stderr.flush()
status = 300
if status not in (204, 200):
time.sleep((random.random()*3)+2)

View File

@@ -0,0 +1 @@
ubuntu22.04

View File

@@ -0,0 +1 @@
ubuntu20.04-diskless

View File

@@ -22,6 +22,8 @@ import shutil
import eventlet.green.socket as socket
import eventlet
import greenlet
import pwd
import signal
def fprint(txt):
sys.stdout.write(txt)
@@ -109,6 +111,8 @@ def nics_missing_ipv6():
iname, state = comps[:2]
if iname == b'lo':
continue
if iname == b'virbr0':
continue
addrs = comps[2:]
hasv6 = False
hasv4 = False
@@ -157,6 +161,7 @@ def lookup_node(node):
if __name__ == '__main__':
ap = argparse.ArgumentParser(description='Run configuration checks for a system running confluent service')
ap.add_argument('-n', '--node', help='A node name to run node specific checks against')
ap.add_argument('-a', '--automation', help='Do checks against a deployed node for automation and syncfiles function', action='store_true')
args, extra = ap.parse_known_args(sys.argv)
if len(extra) > 1:
ap.print_help()
@@ -217,6 +222,7 @@ if __name__ == '__main__':
print('OK')
except subprocess.CalledProcessError:
emprint('Failed to load confluent automation key, syncfiles and profile ansible plays will not work (Example resolution: osdeploy initialize -a)')
os.kill(int(sshutil.agent_pid), signal.SIGTERM)
fprint('Checking for blocked insecure boot: ')
if insecure_boot_attempts():
emprint('Some nodes are attempting network boot using PXE or HTTP boot, but the node is not configured to allow this (Example resolution: nodegroupattrib everything deployment.useinsecureprotocols=firmware)')
@@ -274,13 +280,17 @@ if __name__ == '__main__':
cfg = configmanager.ConfigManager(None)
bootablev4nics = []
bootablev6nics = []
targsships = []
for nic in glob.glob("/sys/class/net/*/ifindex"):
idx = int(open(nic, "r").read())
nicname = nic.split('/')[-2]
ncfg = netutil.get_nic_config(cfg, args.node, ifidx=idx)
if ncfg['ipv4_address']:
targsships.append(ncfg['ipv4_address'])
if ncfg['ipv4_address'] or ncfg['ipv4_method'] == 'dhcp':
bootablev4nics.append(nicname)
if ncfg['ipv6_address']:
targsships.append(ncfg['ipv6_address'])
bootablev6nics.append(nicname)
if bootablev4nics:
print("{} appears to have network configuration suitable for IPv4 deployment via: {}".format(args.node, ",".join(bootablev4nics)))
@@ -311,6 +321,34 @@ if __name__ == '__main__':
emprint('Name resolution failed for node, it is normally a good idea for the node name to resolve to an IP')
if result:
print("OK")
if args.automation:
print(f'Checking confluent automation access to {args.node}...')
child = os.fork()
if child > 0:
pid, extcode = os.waitpid(child, 0)
else:
sshutil.ready_keys = {}
sshutil.agent_pid = None
cuser = pwd.getpwnam('confluent')
os.setgid(cuser.pw_gid)
os.setuid(cuser.pw_uid)
sshutil.prep_ssh_key('/etc/confluent/ssh/automation')
for targ in targsships:
srun = subprocess.run(
['ssh', '-Tn', '-o', 'BatchMode=yes', '-l', 'root',
'-o', 'StrictHostKeyChecking=yes', targ, 'true'],
stdin=subprocess.DEVNULL, stderr=subprocess.PIPE)
if srun.returncode == 0:
print(f'Confluent automation access to {targ} seems OK')
else:
if b'Host key verification failed' in srun.stderr:
emprint(f'Confluent ssh unable to verify host key for {targ}, check /etc/ssh/ssh_known_hosts. (Example resolution: osdeploy initialize -k)')
elif b'ermission denied' in srun.stderr:
emprint(f'Confluent user unable to ssh in to {targ}, check /root/.ssh/authorized_keys on the target system versus /etc/confluent/ssh/automation.pub (Example resolution: osdeploy initialize -a)')
else:
emprint('Unknown error attempting confluent automation ssh:')
sys.stderr.buffer.write(srun.stderr)
os.kill(int(sshutil.agent_pid), signal.SIGTERM)
else:
print("Skipping node checks, no node specified (Example: confluent_selfcheck -n n1)")
# possible checks:

View File

@@ -1,4 +1,4 @@
#!/usr/bin/python2
#!/usr/bin/python3
__author__ = 'jjohnson2,bfinley'
@@ -49,8 +49,11 @@ def main(args):
wiz.add_argument('-p', help='Copy in TFTP contents required for PXE support', action='store_true')
wiz.add_argument('-i', help='Interactively prompt for behaviors', action='store_true')
wiz.add_argument('-l', help='Set up local management node to allow login from managed nodes', action='store_true')
osip = sp.add_parser('importcheck', help='Check import of an OS image from an ISO image')
osip.add_argument('imagefile', help='File to use for source of importing')
osip = sp.add_parser('import', help='Import an OS image from an ISO image')
osip.add_argument('imagefile', help='File to use for source of importing')
osip.add_argument('-n', help='Specific a custom distribution name')
upb = sp.add_parser(
'updateboot',
help='Push profile.yaml of the named profile data into boot assets as appropriate')
@@ -63,7 +66,9 @@ def main(args):
if cmdset.command == 'list':
return oslist()
if cmdset.command == 'import':
return osimport(cmdset.imagefile)
return osimport(cmdset.imagefile, custname=cmdset.n)
if cmdset.command == 'importcheck':
return osimport(cmdset.imagefile, checkonly=True)
if cmdset.command == 'initialize':
return initialize(cmdset)
if cmdset.command == 'updateboot':
@@ -72,6 +77,12 @@ def main(args):
return rebase(cmdset.profile)
ap.print_help()
def symlinkp(src, trg):
try:
os.symlink(src, trg)
except Exception as e:
if e.errno != 17:
raise
def initialize_genesis():
if not os.path.exists('/opt/confluent/genesis/x86_64/boot/kernel'):
@@ -89,30 +100,33 @@ def initialize_genesis():
return retval[1]
retcode = 0
try:
util.mkdirp('/var/lib/confluent', 0o755)
if hasconfluentuser:
os.chown('/var/lib/confluent', hasconfluentuser.pw_uid, -1)
os.setgid(hasconfluentuser.pw_gid)
os.setuid(hasconfluentuser.pw_uid)
os.umask(0o22)
os.makedirs('/var/lib/confluent/public/os/genesis-x86_64/boot/efi/boot', 0o755)
os.makedirs('/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs', 0o755)
os.symlink('/opt/confluent/genesis/x86_64/boot/efi/boot/BOOTX64.EFI',
util.mkdirp('/var/lib/confluent/public/os/genesis-x86_64/boot/efi/boot', 0o755)
util.mkdirp('/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs', 0o755)
symlinkp('/opt/confluent/genesis/x86_64/boot/efi/boot/BOOTX64.EFI',
'/var/lib/confluent/public/os/genesis-x86_64/boot/efi/boot/BOOTX64.EFI')
os.symlink('/opt/confluent/genesis/x86_64/boot/efi/boot/grubx64.efi',
symlinkp('/opt/confluent/genesis/x86_64/boot/efi/boot/grubx64.efi',
'/var/lib/confluent/public/os/genesis-x86_64/boot/efi/boot/grubx64.efi')
os.symlink('/opt/confluent/genesis/x86_64/boot/initramfs/distribution',
symlinkp('/opt/confluent/genesis/x86_64/boot/initramfs/distribution',
'/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs/distribution')
os.symlink('/var/lib/confluent/public/site/initramfs.cpio',
symlinkp('/var/lib/confluent/public/site/initramfs.cpio',
'/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs/site.cpio')
os.symlink('/opt/confluent/lib/osdeploy/genesis/initramfs/addons.cpio',
symlinkp('/opt/confluent/lib/osdeploy/genesis/initramfs/addons.cpio',
'/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs/addons.cpio')
os.symlink('/opt/confluent/genesis/x86_64/boot/kernel',
symlinkp('/opt/confluent/genesis/x86_64/boot/kernel',
'/var/lib/confluent/public/os/genesis-x86_64/boot/kernel')
shutil.copytree('/opt/confluent/lib/osdeploy/genesis/profiles/default/ansible/',
'/var/lib/confluent/public/os/genesis-x86_64/ansible/')
shutil.copytree('/opt/confluent/lib/osdeploy/genesis/profiles/default/scripts/',
'/var/lib/confluent/public/os/genesis-x86_64/scripts/')
shutil.copyfile('/opt/confluent/lib/osdeploy/genesis/profiles/default/profile.yaml',
'/var/lib/confluent/public/os/genesis-x86_64/profile.yaml')
if not os.path.exists('/var/lib/confluent/public/os/genesis-x86_64/ansible/'):
shutil.copytree('/opt/confluent/lib/osdeploy/genesis/profiles/default/ansible/',
'/var/lib/confluent/public/os/genesis-x86_64/ansible/')
shutil.copytree('/opt/confluent/lib/osdeploy/genesis/profiles/default/scripts/',
'/var/lib/confluent/public/os/genesis-x86_64/scripts/')
shutil.copyfile('/opt/confluent/lib/osdeploy/genesis/profiles/default/profile.yaml',
'/var/lib/confluent/public/os/genesis-x86_64/profile.yaml')
except Exception as e:
sys.stderr.write(str(e) + '\n')
retcode = 1
@@ -373,9 +387,14 @@ def initialize(cmdset):
for rsp in c.read('/uuid'):
uuid = rsp.get('uuid', {}).get('value', None)
if uuid:
with open('confluent_uuid', 'w') as uuidout:
uuidout.write(uuid)
uuidout.write('\n')
oum = os.umask(0o11)
try:
with open('confluent_uuid', 'w') as uuidout:
uuidout.write(uuid)
uuidout.write('\n')
os.chmod('confluent_uuid', 0o644)
finally:
os.umask(oum)
totar.append('confluent_uuid')
topack.append('confluent_uuid')
if os.path.exists('ssh'):
@@ -403,7 +422,17 @@ def initialize(cmdset):
if res:
sys.stderr.write('Error occurred while packing site initramfs')
sys.exit(1)
os.rename(tmpname, '/var/lib/confluent/public/site/initramfs.cpio')
oum = os.umask(0o22)
try:
os.rename(tmpname, '/var/lib/confluent/public/site/initramfs.cpio')
os.chmod('/var/lib/confluent/public/site/initramfs.cpio', 0o644)
finally:
os.umask(oum)
oum = os.umask(0o22)
try:
os.chmod('/var/lib/confluent/public/site/initramfs.cpio', 0o644)
finally:
os.umask(oum)
if cmdset.g:
updateboot('genesis-x86_64')
if totar:
@@ -411,6 +440,11 @@ def initialize(cmdset):
tarcmd = ['tar', '-czf', tmptarname] + totar
subprocess.check_call(tarcmd)
os.rename(tmptarname, '/var/lib/confluent/public/site/initramfs.tgz')
oum = os.umask(0o22)
try:
os.chmod('/var/lib/confluent/public/site/initramfs.tgz', 0o644)
finally:
os.umask(0o22)
os.chdir(opath)
print('Site initramfs content packed successfully')
@@ -421,6 +455,9 @@ def initialize(cmdset):
def updateboot(profilename):
if not os.path.exists('/var/lib/confluent/public/site/initramfs.cpio'):
emprint('Must generate site content first (TLS (-t) and/or SSH (-s))')
return 1
c = client.Command()
for rsp in c.update('/deployment/profiles/{0}'.format(profilename),
{'updateboot': 1}):
@@ -464,7 +501,7 @@ def oslist():
print("")
def osimport(imagefile):
def osimport(imagefile, checkonly=False, custname=None):
c = client.Command()
imagefile = os.path.abspath(imagefile)
if c.unixdomain:
@@ -475,11 +512,33 @@ def osimport(imagefile):
pass
importing = False
shortname = None
for rsp in c.create('/deployment/importing/', {'filename': imagefile}):
apipath = '/deployment/importing/'
if checkonly:
apipath = '/deployment/fingerprint/'
apiargs = {'filename': imagefile}
if custname:
apiargs['custname'] = custname
for rsp in c.create(apipath, apiargs):
if 'target' in rsp:
importing = True
shortname = rsp['name']
print('Importing from {0} to {1}'.format(imagefile, rsp['target']))
elif 'targetpath' in rsp:
tpath = rsp.get('targetpath', None)
tname = rsp.get('name', None)
oscat = rsp.get('oscategory', None)
if tpath:
print('Detected target directory: ' + tpath)
if tname:
print('Detected distribution name: ' + tname)
if oscat:
print('Detected OS category: ' + oscat)
for err in rsp.get('errors', []):
sys.stderr.write('Error: ' + err + '\n')
elif 'error' in rsp:
sys.stderr.write(rsp['error'] + '\n')
sys.exit(rsp.get('errorcode', 1))
else:
print(repr(rsp))
try:

View File

@@ -95,27 +95,29 @@ def assure_tls_ca():
os.makedirs(os.path.dirname(fname))
except OSError as e:
if e.errno != 17:
os.seteuid(ouid)
raise
try:
shutil.copy2('/etc/confluent/tls/cacert.pem', fname)
hv, _ = util.run(
['openssl', 'x509', '-in', '/etc/confluent/tls/cacert.pem', '-hash', '-noout'])
if not isinstance(hv, str):
hv = hv.decode('utf8')
hv = hv.strip()
hashname = '/var/lib/confluent/public/site/tls/{0}.0'.format(hv)
certname = '{0}.pem'.format(collective.get_myname())
for currname in os.listdir('/var/lib/confluent/public/site/tls/'):
currname = os.path.join('/var/lib/confluent/public/site/tls/', currname)
if currname.endswith('.0'):
try:
realname = os.readlink(currname)
if realname == certname:
os.unlink(currname)
except OSError:
pass
os.symlink(certname, hashname)
finally:
os.seteuid(ouid)
shutil.copy2('/etc/confluent/tls/cacert.pem', fname)
hv, _ = util.run(
['openssl', 'x509', '-in', '/etc/confluent/tls/cacert.pem', '-hash', '-noout'])
if not isinstance(hv, str):
hv = hv.decode('utf8')
hv = hv.strip()
hashname = '/var/lib/confluent/public/site/tls/{0}.0'.format(hv)
certname = '{0}.pem'.format(collective.get_myname())
for currname in os.listdir('/var/lib/confluent/public/site/tls/'):
currname = os.path.join('/var/lib/confluent/public/site/tls/', currname)
if currname.endswith('.0'):
try:
realname = os.readlink(currname)
if realname == certname:
os.unlink(currname)
except OSError:
pass
os.symlink(certname, hashname)
def substitute_cfg(setting, key, val, newval, cfgfile, line):
if key.strip() == setting:
@@ -204,7 +206,7 @@ def create_simple_ca(keyout, certout):
finally:
os.remove(tmpconfig)
def create_certificate(keyout=None, certout=None):
def create_certificate(keyout=None, certout=None, csrout=None):
if not keyout:
keyout, certout = get_certificate_paths()
if not keyout:
@@ -212,9 +214,10 @@ def create_certificate(keyout=None, certout=None):
assure_tls_ca()
shortname = socket.gethostname().split('.')[0]
longname = shortname # socket.getfqdn()
subprocess.check_call(
['openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out',
keyout])
if not csrout:
subprocess.check_call(
['openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out',
keyout])
san = ['IP:{0}'.format(x) for x in get_ip_addresses()]
# It is incorrect to put IP addresses as DNS type. However
# there exists non-compliant clients that fail with them as IP
@@ -227,21 +230,34 @@ def create_certificate(keyout=None, certout=None):
os.close(tmphdl)
tmphdl, extconfig = tempfile.mkstemp()
os.close(tmphdl)
tmphdl, csrout = tempfile.mkstemp()
os.close(tmphdl)
needcsr = False
if csrout is None:
needcsr = True
tmphdl, csrout = tempfile.mkstemp()
os.close(tmphdl)
shutil.copy2(sslcfg, tmpconfig)
serialnum = '0x' + ''.join(['{:02x}'.format(x) for x in bytearray(os.urandom(20))])
try:
with open(tmpconfig, 'a') as cfgfile:
cfgfile.write('\n[SAN]\nsubjectAltName={0}'.format(san))
with open(extconfig, 'a') as cfgfile:
cfgfile.write('\nbasicConstraints=CA:false\nsubjectAltName={0}'.format(san))
subprocess.check_call([
'openssl', 'req', '-new', '-key', keyout, '-out', csrout, '-subj',
'/CN={0}'.format(longname),
'-extensions', 'SAN', '-config', tmpconfig
])
if needcsr:
with open(tmpconfig, 'a') as cfgfile:
cfgfile.write('\n[SAN]\nsubjectAltName={0}'.format(san))
with open(extconfig, 'a') as cfgfile:
cfgfile.write('\nbasicConstraints=CA:false\nsubjectAltName={0}'.format(san))
subprocess.check_call([
'openssl', 'req', '-new', '-key', keyout, '-out', csrout, '-subj',
'/CN={0}'.format(longname),
'-extensions', 'SAN', '-config', tmpconfig
])
else:
# when used manually, allow the csr SAN to stand
# may add explicit subj/SAN argument, in which case we would skip copy
with open(tmpconfig, 'a') as cfgfile:
cfgfile.write('\ncopy_extensions=copy\n')
with open(extconfig, 'a') as cfgfile:
cfgfile.write('\nbasicConstraints=CA:false\n')
if os.path.exists('/etc/confluent/tls/cakey.pem'):
# simple style CA in effect, make a random serial number and
# hope for the best, and accept inability to backdate the cert
serialnum = '0x' + ''.join(['{:02x}'.format(x) for x in bytearray(os.urandom(20))])
subprocess.check_call([
'openssl', 'x509', '-req', '-in', csrout,
'-CA', '/etc/confluent/tls/cacert.pem',
@@ -250,20 +266,40 @@ def create_certificate(keyout=None, certout=None):
'-extfile', extconfig
])
else:
# we moved to a 'proper' CA, mainly for access to backdating
# start of certs for finicky system clocks
# this also provides a harder guarantee of serial uniqueness, but
# not of practical consequence (160 bit random value is as good as
# guaranteed unique)
# downside is certificate generation is serialized
cacfgfile = '/etc/confluent/tls/ca/openssl.cfg'
if needcsr:
tmphdl, tmpcafile = tempfile.mkstemp()
shutil.copy2(cacfgfile, tmpcafile)
os.close(tmphdl)
cacfgfile = tmpcafile
# with realcalock: # if we put it in server, we must lock it
subprocess.check_call([
'openssl', 'ca', '-config', '/etc/confluent/tls/ca/openssl.cfg',
'openssl', 'ca', '-config', cacfgfile,
'-in', csrout, '-out', certout, '-batch', '-notext',
'-startdate', '19700101010101Z', '-enddate', '21000101010101Z',
'-extfile', extconfig
])
finally:
os.remove(tmpconfig)
os.remove(csrout)
os.remove(extconfig)
if needcsr:
os.remove(csrout)
print(extconfig) # os.remove(extconfig)
if __name__ == '__main__':
import sys
outdir = os.getcwd()
keyout = os.path.join(outdir, 'key.pem')
certout = os.path.join(outdir, 'cert.pem')
create_certificate(keyout, certout)
certout = os.path.join(outdir, sys.argv[2] + 'cert.pem')
csrout = None
try:
csrout = sys.argv[1]
except IndexError:
csrout = None
create_certificate(keyout, certout, csrout)

View File

@@ -252,10 +252,12 @@ def _rpc_master_rename_nodegroups(tenant, renamemap):
def _rpc_master_clear_node_attributes(tenant, nodes, attributes):
ConfigManager(tenant).clear_node_attributes(nodes, attributes)
warnings = []
ConfigManager(tenant).clear_node_attributes(nodes, attributes, warnings)
return warnings
def _rpc_clear_node_attributes(tenant, nodes, attributes):
def _rpc_clear_node_attributes(tenant, nodes, attributes): # master has to do the warnings
ConfigManager(tenant)._true_clear_node_attributes(nodes, attributes)
@@ -348,9 +350,9 @@ def exec_on_leader(function, *args):
rpclen = len(rpcpayload)
cfgleader.sendall(struct.pack('!Q', rpclen))
cfgleader.sendall(rpcpayload)
_pendingchangesets[xid].wait()
retv = _pendingchangesets[xid].wait()
del _pendingchangesets[xid]
return
return retv
def exec_on_followers(fnname, *args):
@@ -714,8 +716,9 @@ def relay_slaved_requests(name, listener):
exc = None
if not (rpc['function'].startswith('_rpc_') or rpc['function'].endswith('_collective_member')):
raise Exception('Unsupported function {0} called'.format(rpc['function']))
retv = None
try:
globals()[rpc['function']](*rpc['args'])
retv = globals()[rpc['function']](*rpc['args'])
except ValueError as ve:
exc = ['ValueError', str(ve)]
except Exception as e:
@@ -723,7 +726,7 @@ def relay_slaved_requests(name, listener):
exc = ['Exception', str(e)]
if 'xid' in rpc:
res = _push_rpc(listener, msgpack.packb({'xid': rpc['xid'],
'exc': exc}, use_bin_type=False))
'exc': exc, 'ret': retv}, use_bin_type=False))
if not res:
break
try:
@@ -929,7 +932,7 @@ def follow_channel(channel):
exc = Exception(excstr)
_pendingchangesets[rpc['xid']].send_exception(exc)
else:
_pendingchangesets[rpc['xid']].send()
_pendingchangesets[rpc['xid']].send(rpc.get('ret', None))
if 'quorum' in rpc:
_hasquorum = rpc['quorum']
res = _push_rpc(channel, b'') # use null as ACK
@@ -1089,6 +1092,11 @@ class _ExpressionFormat(string.Formatter):
self._nodename = nodename
self._numbers = None
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth,
auto_arg_index=False):
return super()._vformat(format_string, args, kwargs, used_args,
recursion_depth, auto_arg_index)
def get_field(self, field_name, args, kwargs):
return field_name, field_name
@@ -2197,16 +2205,19 @@ class ConfigManager(object):
self._notif_attribwatchers(changeset)
self._bg_sync_to_file()
def clear_node_attributes(self, nodes, attributes):
def clear_node_attributes(self, nodes, attributes, warnings=None):
if cfgleader:
return exec_on_leader('_rpc_master_clear_node_attributes',
mywarnings = exec_on_leader('_rpc_master_clear_node_attributes',
self.tenant, nodes, attributes)
if mywarnings and warnings is not None:
warnings.extend(mywarnings)
return
if cfgstreams:
exec_on_followers('_rpc_clear_node_attributes', self.tenant,
nodes, attributes)
self._true_clear_node_attributes(nodes, attributes)
self._true_clear_node_attributes(nodes, attributes, warnings)
def _true_clear_node_attributes(self, nodes, attributes):
def _true_clear_node_attributes(self, nodes, attributes, warnings=None):
# accumulate all changes into a changeset and push in one go
changeset = {}
realattributes = []
@@ -2229,8 +2240,17 @@ class ConfigManager(object):
# delete it and check for inheritence to backfil data
del nodek[attrib]
self._do_inheritance(nodek, attrib, node, changeset)
if warnings is not None:
if attrib in nodek:
warnings.append('The attribute "{}" was defined specifically for the node and clearing now has a value inherited from the group "{}"'.format(attrib, nodek[attrib]['inheritedfrom']))
_addchange(changeset, node, attrib)
_mark_dirtykey('nodes', node, self.tenant)
elif attrib in nodek:
if warnings is not None:
warnings.append('The attribute "{0}" is inherited from group "{1}", leaving the inherited value alone (use "{0}=" with no value to explicitly blank the value if desired)'.format(attrib, nodek[attrib]['inheritedfrom']))
else:
if warnings is not None:
warnings.append('Attribute "{}" is either already cleared, or does not match a defined attribute (if referencing an attribute group, try a wildcard)'.format(attrib))
if ('_expressionkeys' in nodek and
attrib in nodek['_expressionkeys']):
recalcexpressions = True

View File

@@ -49,7 +49,6 @@ _handled_consoles = {}
_tracelog = None
_bufferdaemon = None
_bufferlock = None
try:
range = xrange
@@ -62,39 +61,38 @@ def chunk_output(output, n):
yield output[i:i + n]
def get_buffer_output(nodename):
out = _bufferdaemon.stdin
instream = _bufferdaemon.stdout
out = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
out.setsockopt(socket.SOL_SOCKET, socket.SO_PASSCRED, 1)
out.connect("\x00confluent-vtbuffer")
if not isinstance(nodename, bytes):
nodename = nodename.encode('utf8')
outdata = bytearray()
with _bufferlock:
out.write(struct.pack('I', len(nodename)))
out.write(nodename)
out.flush()
select.select((instream,), (), (), 30)
while not outdata or outdata[-1]:
try:
chunk = os.read(instream.fileno(), 128)
except IOError:
chunk = None
if chunk:
outdata.extend(chunk)
else:
select.select((instream,), (), (), 0)
return bytes(outdata[:-1])
out.send(struct.pack('I', len(nodename)))
out.send(nodename)
select.select((out,), (), (), 30)
while not outdata or outdata[-1]:
try:
chunk = os.read(out.fileno(), 128)
except IOError:
chunk = None
if chunk:
outdata.extend(chunk)
else:
select.select((out,), (), (), 0)
return bytes(outdata[:-1])
def send_output(nodename, output):
if not isinstance(nodename, bytes):
nodename = nodename.encode('utf8')
with _bufferlock:
_bufferdaemon.stdin.write(struct.pack('I', len(nodename) | (1 << 29)))
_bufferdaemon.stdin.write(nodename)
_bufferdaemon.stdin.flush()
for chunk in chunk_output(output, 8192):
_bufferdaemon.stdin.write(struct.pack('I', len(chunk) | (2 << 29)))
_bufferdaemon.stdin.write(chunk)
_bufferdaemon.stdin.flush()
out = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
out.setsockopt(socket.SOL_SOCKET, socket.SO_PASSCRED, 1)
out.connect("\x00confluent-vtbuffer")
out.send(struct.pack('I', len(nodename) | (1 << 29)))
out.send(nodename)
for chunk in chunk_output(output, 8192):
out.send(struct.pack('I', len(chunk) | (2 << 29)))
out.send(chunk)
def _utf8_normalize(data, decoder):
# first we give the stateful decoder a crack at the byte stream,
@@ -175,6 +173,9 @@ class ConsoleHandler(object):
self.connectstate = 'connecting'
eventlet.spawn(self._connect)
def resize(self, width, height):
return None
def _get_retry_time(self):
clustsize = len(self.cfgmgr._cfgstore['nodes'])
self._retrytime = self._retrytime * 2 + 1
@@ -600,15 +601,10 @@ def _start_tenant_sessions(cfm):
def initialize():
global _tracelog
global _bufferdaemon
global _bufferlock
_bufferlock = semaphore.Semaphore()
_tracelog = log.Logger('trace')
_bufferdaemon = subprocess.Popen(
['/opt/confluent/bin/vtbufferd'], bufsize=0, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
fl = fcntl.fcntl(_bufferdaemon.stdout.fileno(), fcntl.F_GETFL)
fcntl.fcntl(_bufferdaemon.stdout.fileno(),
fcntl.F_SETFL, fl | os.O_NONBLOCK)
['/opt/confluent/bin/vtbufferd', 'confluent-vtbuffer'], bufsize=0, stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL)
def start_console_sessions():
configmodule.hook_new_configmanagers(_start_tenant_sessions)
@@ -622,6 +618,8 @@ def connect_node(node, configmanager, username=None, direct=True, width=80,
myname = collective.get_myname()
if myc and myc != collective.get_myname() and direct:
minfo = configmodule.get_collective_member(myc)
if not minfo:
raise Exception('Unable to get collective member for {}'.format(node))
return ProxyConsole(node, minfo, myname, configmanager, username,
width, height)
consk = (node, configmanager.tenant)

View File

@@ -44,6 +44,7 @@ import confluent.discovery.core as disco
import confluent.interface.console as console
import confluent.exceptions as exc
import confluent.messages as msg
import confluent.mountmanager as mountmanager
import confluent.networking.macmap as macmap
import confluent.noderange as noderange
import confluent.osimage as osimage
@@ -69,6 +70,7 @@ import os
import eventlet.green.socket as socket
import struct
import sys
import yaml
pluginmap = {}
dispatch_plugins = (b'ipmi', u'ipmi', b'redfish', u'redfish', b'tsmsol', u'tsmsol', b'geist', u'geist', b'deltapdu', u'deltapdu', b'eatonpdu', u'eatonpdu', b'affluent', u'affluent', b'cnos', u'cnos')
@@ -159,7 +161,7 @@ def _merge_dict(original, custom):
rootcollections = ['deployment/', 'discovery/', 'events/', 'networking/',
'noderange/', 'nodes/', 'nodegroups/', 'usergroups/' ,
'noderange/', 'nodes/', 'nodegroups/', 'storage/', 'usergroups/' ,
'users/', 'uuid', 'version']
@@ -169,6 +171,14 @@ class PluginRoute(object):
def handle_storage(configmanager, inputdata, pathcomponents, operation):
if len(pathcomponents) == 1:
yield msg.ChildCollection('remote/')
return
if pathcomponents[1] == 'remote':
for rsp in mountmanager.handle_request(configmanager, inputdata, pathcomponents[2:], operation):
yield rsp
def handle_deployment(configmanager, inputdata, pathcomponents,
operation):
if len(pathcomponents) == 1:
@@ -191,8 +201,19 @@ def handle_deployment(configmanager, inputdata, pathcomponents,
for prof in osimage.list_profiles():
yield msg.ChildCollection(prof + '/')
return
if len(pathcomponents) == 3:
profname = pathcomponents[-1]
if len(pathcomponents) >= 3:
profname = pathcomponents[2]
if len(pathcomponents) == 4:
if operation == 'retrieve':
if len(pathcomponents) == 4 and pathcomponents[-1] == 'info':
with open('/var/lib/confluent/public/os/{}/profile.yaml'.format(profname)) as profyaml:
profinfo = yaml.safe_load(profyaml)
profinfo['name'] = profname
yield msg.KeyValueData(profinfo)
return
elif len(pathcomponents) == 3:
if operation == 'retrieve':
yield msg.ChildCollection('info')
if operation == 'update':
if 'updateboot' in inputdata:
osimage.update_boot(profname)
@@ -208,6 +229,17 @@ def handle_deployment(configmanager, inputdata, pathcomponents,
for cust in customized:
yield msg.KeyValueData({'customized': cust})
return
if pathcomponents[1] == 'fingerprint':
if operation == 'create':
importer = osimage.MediaImporter(inputdata['filename'], configmanager, checkonly=True)
medinfo = {
'targetpath': importer.targpath,
'name': importer.osname,
'oscategory': importer.oscategory,
'errors': importer.errors,
}
yield msg.KeyValueData(medinfo)
return
if pathcomponents[1] == 'importing':
if len(pathcomponents) == 2 or not pathcomponents[-1]:
if operation == 'retrieve':
@@ -215,8 +247,12 @@ def handle_deployment(configmanager, inputdata, pathcomponents,
yield imp
return
elif operation == 'create':
importer = osimage.MediaImporter(inputdata['filename'],
configmanager)
if inputdata.get('custname', None):
importer = osimage.MediaImporter(inputdata['filename'],
configmanager, inputdata['custname'])
else:
importer = osimage.MediaImporter(inputdata['filename'],
configmanager)
yield msg.KeyValueData({'target': importer.targpath,
'name': importer.importkey})
return
@@ -1245,6 +1281,9 @@ def handle_path(path, operation, configmanager, inputdata=None, autostrip=True):
elif pathcomponents[0] == 'deployment':
return handle_deployment(configmanager, inputdata, pathcomponents,
operation)
elif pathcomponents[0] == 'storage':
return handle_storage(configmanager, inputdata, pathcomponents,
operation)
elif pathcomponents[0] == 'nodegroups':
return handle_nodegroup_request(configmanager, inputdata,
pathcomponents,

View File

@@ -127,14 +127,15 @@ class CredServer(object):
if hmacval != hmac.new(hmackey, etok, hashlib.sha256).digest():
client.close()
return
cfgupdate = {nodename: {'crypted.selfapikey': {'hashvalue': echotoken}, 'deployment.sealedapikey': '', 'deployment.apiarmed': ''}}
if hmackey and apiarmed != 'continuous':
self.cfm.clear_node_attributes([nodename], ['secret.selfapiarmtoken'])
if apiarmed == 'continuous':
del cfgupdate[nodename]['deployment.apiarmed']
cfgupdate = {nodename: {'crypted.selfapikey': {'hashvalue': echotoken}}}
self.cfm.set_node_attributes(cfgupdate)
client.recv(2) # drain end of message
client.send(b'\x05\x00') # report success
if hmackey and apiarmed != 'continuous':
self.cfm.clear_node_attributes([nodename], ['secret.selfapiarmtoken'])
if apiarmed != 'continuous':
tokclear = {nodename: {'deployment.sealedapikey': '', 'deployment.apiarmed': ''}}
self.cfm.set_node_attributes(tokclear)
finally:
client.close()

View File

@@ -247,6 +247,10 @@ class NodeHandler(immhandler.NodeHandler):
if rsp.status == 200:
pwdchanged = True
password = newpassword
wc.set_header('Authorization', 'Bearer ' + rspdata['access_token'])
if '_csrf_token' in wc.cookies:
wc.set_header('X-XSRF-TOKEN', wc.cookies['_csrf_token'])
wc.grab_json_response_with_status('/api/providers/logout')
else:
if rspdata.get('locktime', 0) > 0:
raise LockedUserException(
@@ -280,6 +284,7 @@ class NodeHandler(immhandler.NodeHandler):
rsp.read()
if rsp.status != 200:
return (None, None)
wc.grab_json_response_with_status('/api/providers/logout')
self._currcreds = (username, newpassword)
wc.set_basic_credentials(username, newpassword)
pwdchanged = True
@@ -434,6 +439,7 @@ class NodeHandler(immhandler.NodeHandler):
'/api/function',
{'USER_UserModify': '{0},{1},,1,4,0,0,0,0,,8,,,'.format(uid, username)})
if status == 200 and rsp.get('return', 0) == 13:
wc.grab_json_response('/api/providers/logout')
wc.set_basic_credentials(self._currcreds[0], self._currcreds[1])
status = 503
while status != 200:
@@ -442,10 +448,13 @@ class NodeHandler(immhandler.NodeHandler):
{'UserName': username}, method='PATCH')
if status != 200:
rsp = json.loads(rsp)
if rsp.get('error', {}).get('code', 'Unknown') in ('Base.1.8.GeneralError', 'Base.1.12.GeneralError'):
eventlet.sleep(10)
if rsp.get('error', {}).get('code', 'Unknown') in ('Base.1.8.GeneralError', 'Base.1.12.GeneralError', 'Base.1.14.GeneralError'):
eventlet.sleep(4)
else:
break
self.tmppasswd = None
self._currcreds = (username, passwd)
return
self.tmppasswd = None
wc.grab_json_response('/api/providers/logout')
self._currcreds = (username, passwd)
@@ -632,3 +641,4 @@ def remote_nodecfg(nodename, cfm):
info = {'addresses': [ipaddr]}
nh = NodeHandler(info, cfm)
nh.config(nodename)

View File

@@ -587,7 +587,10 @@ def get_deployment_profile(node, cfg, cfd=None):
return None
candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None)
if candmgrs:
candmgrs = noderange.NodeRange(candmgrs, cfg).nodes
try:
candmgrs = noderange.NodeRange(candmgrs, cfg).nodes
except Exception: # fallback to unverified noderange
candmgrs = noderange.NodeRange(candmgrs).nodes
if collective.get_myname() not in candmgrs:
return None
return profile
@@ -771,6 +774,14 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
node, profile, len(bootfile) - 127)})
return
repview[108:108 + len(bootfile)] = bootfile
elif info['architecture'] == 'uefi-aarch64' and packet.get(77, None) == b'iPXE':
if not profile:
profile = get_deployment_profile(node, cfg)
if not profile:
log.log({'info': 'No pending profile for {0}, skipping proxyDHCP eply'.format(node)})
return
bootfile = 'http://{0}/confluent-public/os/{1}/boot.ipxe'.format(myipn, profile).encode('utf8')
repview[108:108 + len(bootfile)] = bootfile
myip = myipn
myipn = socket.inet_aton(myipn)
orepview[12:16] = myipn
@@ -812,6 +823,13 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile):
repview[replen - 1:replen + 1] = b'\x03\x04'
repview[replen + 1:replen + 5] = gateway
replen += 6
if 82 in packet:
reloptionslen = len(packet[82])
reloptionshdr = struct.pack('BB', 82, reloptionslen)
repview[replen - 1:replen + 1] = reloptionshdr
repview[replen + 1:replen + reloptionslen + 1] = packet[82]
replen += 2 + reloptionslen
repview[replen - 1:replen] = b'\xff' # end of options, should always be last byte
repview = memoryview(reply)
pktlen = struct.pack('!H', replen + 28) # ip+udp = 28

View File

@@ -246,11 +246,11 @@ def _find_srvtype(net, net4, srvtype, addresses, xid):
try:
net4.sendto(data, ('239.255.255.253', 427))
except socket.error as se:
# On occasion, multicasting may be disabled
# tolerate this scenario and move on
if se.errno != 101:
raise
net4.sendto(data, (bcast, 427))
pass
try:
net4.sendto(data, (bcast, 427))
except socket.error as se:
pass
def _grab_rsps(socks, rsps, interval, xidmap, deferrals):

View File

@@ -53,7 +53,7 @@ def execupdate(handler, filename, updateobj, type, owner, node, datfile):
return
if type == 'ffdc' and os.path.isdir(filename):
filename += '/' + node
if 'type' == 'ffdc':
if type == 'ffdc':
errstr = False
if os.path.exists(filename):
errstr = '{0} already exists on {1}, cannot overwrite'.format(

View File

@@ -618,7 +618,6 @@ def resourcehandler(env, start_response):
yield '500 - ' + str(e)
return
def resourcehandler_backend(env, start_response):
"""Function to handle new wsgi requests
"""
@@ -728,7 +727,13 @@ def resourcehandler_backend(env, start_response):
elif (env['PATH_INFO'].endswith('/forward/web') and
env['PATH_INFO'].startswith('/nodes/')):
prefix, _, _ = env['PATH_INFO'].partition('/forward/web')
_, _, nodename = prefix.rpartition('/')
#_, _, nodename = prefix.rpartition('/')
default = False
if 'default' in env['PATH_INFO']:
default = True
_,_,nodename,_ = prefix.split('/')
else:
_, _, nodename = prefix.rpartition('/')
hm = cfgmgr.get_node_attributes(nodename, 'hardwaremanagement.manager')
targip = hm.get(nodename, {}).get(
'hardwaremanagement.manager', {}).get('value', None)
@@ -737,6 +742,29 @@ def resourcehandler_backend(env, start_response):
yield 'No hardwaremanagement.manager defined for node'
return
targip = targip.split('/', 1)[0]
if default:
try:
ip_info = socket.getaddrinfo(targip, 0, 0, socket.SOCK_STREAM)
except socket.gaierror:
start_response('404 Not Found', headers)
yield 'hardwaremanagement.manager definition could not be resolved'
return
# this is just to future proof just in case the indexes of the address family change in future
for i in range(len(ip_info)):
if ip_info[i][0] == socket.AF_INET:
url = 'https://{0}/'.format(ip_info[i][-1][0])
start_response('302', [('Location', url)])
yield 'Our princess is in another castle!'
return
elif ip_info[i][0] == socket.AF_INET6:
url = 'https://[{0}]/'.format(ip_info[i][-1][0])
if url.startswith('https://[fe80'):
start_response('405 Method Not Allowed', headers)
yield 'link local ipv6 address cannot be used in browser'
return
start_response('302', [('Location', url)])
yield 'Our princess is in another castle!'
return
funport = forwarder.get_port(targip, env['HTTP_X_FORWARDED_FOR'],
authorized['sessionid'])
host = env['HTTP_X_FORWARDED_HOST']

View File

@@ -0,0 +1,79 @@
import eventlet
import confluent.messages as msg
import confluent.exceptions as exc
import struct
import eventlet.green.socket as socket
import eventlet.green.subprocess as subprocess
import os
mountsbyuser = {}
_browserfsd = None
def assure_browserfs():
global _browserfsd
if _browserfsd is None:
os.makedirs('/var/run/confluent/browserfs/mount', exist_ok=True)
_browserfsd = subprocess.Popen(
['/opt/confluent/bin/browserfs',
'-c', '/var/run/confluent/browserfs/control',
'-s', '127.0.0.1:4006',
# browserfs supports unix domain websocket, however apache reverse proxy is dicey that way in some versions
'-w', '/var/run/confluent/browserfs/mount'])
while not os.path.exists('/var/run/confluent/browserfs/control'):
eventlet.sleep(0.5)
def handle_request(configmanager, inputdata, pathcomponents, operation):
curruser = configmanager.current_user
if len(pathcomponents) == 0:
mounts = mountsbyuser.get(curruser, [])
if operation == 'retrieve':
for mount in mounts:
yield msg.ChildCollection(mount['index'])
elif operation == 'create':
if 'name' not in inputdata:
raise exc.InvalidArgumentException('Required parameter "name" is missing')
usedidx = set([])
for mount in mounts:
usedidx.add(mount['index'])
curridx = 1
while curridx in usedidx:
curridx += 1
currmount = requestmount(curruser, inputdata['name'])
currmount['index'] = curridx
if curruser not in mountsbyuser:
mountsbyuser[curruser] = []
mountsbyuser[curruser].append(currmount)
yield msg.KeyValueData({
'path': currmount['path'],
'fullpath': '/var/run/confluent/browserfs/mount/{}'.format(currmount['path']),
'authtoken': currmount['authtoken']
})
def requestmount(subdir, filename):
assure_browserfs()
a = socket.socket(socket.AF_UNIX)
a.connect('/var/run/confluent/browserfs/control')
subname = subdir.encode()
a.send(struct.pack('!II', 1, len(subname)))
a.send(subname)
fname = filename.encode()
a.send(struct.pack('!I', len(fname)))
a.send(fname)
rsp = a.recv(4)
retcode = struct.unpack('!I', rsp)[0]
if retcode != 0:
raise Exception("Bad return code")
rsp = a.recv(4)
nlen = struct.unpack('!I', rsp)[0]
idstr = a.recv(nlen).decode('utf8')
rsp = a.recv(4)
nlen = struct.unpack('!I', rsp)[0]
authtok = a.recv(nlen).decode('utf8')
thismount = {
'id': idstr,
'path': '{}/{}/{}'.format(idstr, subdir, filename),
'authtoken': authtok
}
return thismount

View File

@@ -381,9 +381,10 @@ def list_info(parms, requestedparameter):
break
else:
candidate = info[requestedparameter]
candidate = candidate.strip()
if candidate != '':
results.add(_api_sanitize_string(candidate))
if candidate:
candidate = candidate.strip()
if candidate != '':
results.add(_api_sanitize_string(candidate))
return [msg.ChildCollection(x + suffix) for x in util.natural_sort(results)]
def _handle_neighbor_query(pathcomponents, configmanager):

View File

@@ -96,6 +96,7 @@ class Bracketer(object):
txtnums = getnumbers_nodename(nodename)
nums = [int(x) for x in txtnums]
for n in range(self.count):
# First pass to see if we have exactly one different number
padto = len(txtnums[n])
needpad = (padto != len('{}'.format(nums[n])))
if self.sequences[n] is None:
@@ -105,7 +106,24 @@ class Bracketer(object):
elif self.sequences[n][2] == nums[n] and self.numlens[n][1] == padto:
continue # new nodename has no new number, keep going
else: # if self.sequences[n][2] != nums[n] or :
if self.diffn is not None and (n != self.diffn or
if self.diffn is not None and (n != self.diffn or
(padto < self.numlens[n][1]) or
(needpad and padto != self.numlens[n][1])):
self.flush_current()
self.sequences[n] = [[], nums[n], nums[n]]
self.numlens[n] = [padto, padto]
self.diffn = n
for n in range(self.count):
padto = len(txtnums[n])
needpad = (padto != len('{}'.format(nums[n])))
if self.sequences[n] is None:
# We initialize to text pieces, 'currstart', and 'prev' number
self.sequences[n] = [[], nums[n], nums[n]]
self.numlens[n] = [len(txtnums[n]), len(txtnums[n])]
elif self.sequences[n][2] == nums[n] and self.numlens[n][1] == padto:
continue # new nodename has no new number, keep going
else: # if self.sequences[n][2] != nums[n] or :
if self.diffn is not None and (n != self.diffn or
(padto < self.numlens[n][1]) or
(needpad and padto != self.numlens[n][1])):
self.flush_current()
@@ -384,12 +402,16 @@ class NodeRange(object):
def _expandstring(self, element, filternodes=None):
prefix = ''
if element[0][0] in ('/', '~'):
if self.purenumeric:
raise Exception('Regular expression not supported within "[]"')
element = ''.join(element)
nameexpression = element[1:]
if self.cfm is None:
raise Exception('Verification configmanager required')
return set(self.cfm.filter_nodenames(nameexpression, filternodes))
elif '=' in element[0] or '!~' in element[0]:
if self.purenumeric:
raise Exception('Equality/Inequality operators (=, !=, =~, !~) are invalid within "[]"')
element = ''.join(element)
if self.cfm is None:
raise Exception('Verification configmanager required')
@@ -449,3 +471,29 @@ class NodeRange(object):
if self.cfm is None:
return set([element])
raise Exception(element + ' not a recognized node, group, or alias')
if __name__ == '__main__':
cases = [
(['r3u4', 'r5u6'], 'r3u4,r5u6'), # should not erroneously gather
(['r3u4s1', 'r5u6s3'], 'r3u4s1,r5u6s3'), # should not erroneously gather
(['r3u4s1', 'r3u4s2', 'r5u4s3'], 'r3u4s[1:2],r5u4s3'), # should not erroneously gather
(['r3u4', 'r3u5', 'r3u6', 'r3u9', 'r4u1'], 'r3u[4:6,9],r4u1'),
(['n01', 'n2', 'n03'], 'n01,n2,n03'),
(['n7', 'n8', 'n09', 'n10', 'n11', 'n12', 'n13', 'n14', 'n15', 'n16',
'n17', 'n18', 'n19', 'n20'], 'n[7:8],n[09:20]')
]
for case in cases:
gc = case[0]
bracketer = Bracketer(gc[0])
for chnk in gc[1:]:
bracketer.extend(chnk)
br = bracketer.range
resnodes = NodeRange(br).nodes
if set(resnodes) != set(gc):
print('FAILED: ' + repr(sorted(gc)))
print('RESULT: ' + repr(sorted(resnodes)))
print('EXPECTED: ' + repr(case[1]))
print('ACTUAL: ' + br)

View File

@@ -411,9 +411,7 @@ def check_ubuntu(isoinfo):
]
return {'name': 'ubuntu-{0}-{1}'.format(ver, arch),
'method': EXTRACT|COPY,
'extractlist': ['casper/vmlinuz', 'casper/initrd',
'efi/boot/bootx64.efi', 'efi/boot/grubx64.efi'
],
'extractlist': exlist,
'copyto': 'install.iso',
'category': 'ubuntu{0}'.format(major)}
@@ -601,7 +599,7 @@ def fingerprint(archive):
return imginfo, None, None
def import_image(filename, callback, backend=False, mfd=None):
def import_image(filename, callback, backend=False, mfd=None, custtargpath=None, custdistpath=None, custname=''):
if mfd:
archive = os.fdopen(int(mfd), 'rb')
else:
@@ -610,11 +608,16 @@ def import_image(filename, callback, backend=False, mfd=None):
if not identity:
return -1
identity, imginfo, funname = identity
targpath = identity['name']
distpath = '/var/lib/confluent/distributions/' + targpath
if identity.get('subname', None):
targpath += '/' + identity['subname']
targpath = '/var/lib/confluent/distributions/' + targpath
distpath = custdistpath
if not distpath:
targpath = identity['name']
distpath = '/var/lib/confluent/distributions/' + targpath
if not custtargpath:
if identity.get('subname', None):
targpath += '/' + identity['subname']
targpath = '/var/lib/confluent/distributions/' + targpath
else:
targpath = custtargpath
try:
os.makedirs(targpath, 0o755)
except Exception as e:
@@ -747,9 +750,9 @@ def rebase_profile(dirname):
# customization detected, skip
# else
# update required, manifest update
def get_hashes(dirname):
hashmap = {}
for dname, _, fnames in os.walk(dirname):
@@ -765,18 +768,21 @@ def get_hashes(dirname):
def generate_stock_profiles(defprofile, distpath, targpath, osname,
profilelist):
profilelist, customname):
osd, osversion, arch = osname.split('-')
bootupdates = []
for prof in os.listdir('{0}/profiles'.format(defprofile)):
srcname = '{0}/profiles/{1}'.format(defprofile, prof)
profname = '{0}-{1}'.format(osname, prof)
if customname:
profname = '{0}-{1}'.format(customname, prof)
else:
profname = '{0}-{1}'.format(osname, prof)
dirname = '/var/lib/confluent/public/os/{0}'.format(profname)
if os.path.exists(dirname):
continue
oumask = os.umask(0o22)
shutil.copytree(srcname, dirname)
hmap = get_hashes(dirname)
hmap = get_hashes(dirname)
profdata = None
try:
os.makedirs('{0}/boot/initramfs'.format(dirname), 0o755)
@@ -824,11 +830,12 @@ def generate_stock_profiles(defprofile, distpath, targpath, osname,
class MediaImporter(object):
def __init__(self, media, cfm=None):
def __init__(self, media, cfm=None, customname=None, checkonly=False):
self.worker = None
if not os.path.exists('/var/lib/confluent/public'):
raise Exception('`osdeploy initialize` must be executed before importing any media')
self.profiles = []
self.errors = []
medfile = None
self.medfile = None
if cfm and media in cfm.clientfiles:
@@ -848,25 +855,35 @@ class MediaImporter(object):
self.phase = 'copying'
if not identity:
raise Exception('Unrecognized OS Media')
if 'subname' in identity:
self.customname = customname if customname else ''
if customname:
importkey = customname
elif 'subname' in identity:
importkey = '{0}-{1}'.format(identity['name'], identity['subname'])
else:
importkey = identity['name']
if importkey in importing:
if importkey in importing and not checkonly:
raise Exception('Media import already in progress for this media')
self.importkey = importkey
importing[importkey] = self
self.importkey = importkey
self.osname = identity['name']
self.oscategory = identity.get('category', None)
targpath = identity['name']
if customname:
targpath = customname
else:
targpath = identity['name']
self.distpath = '/var/lib/confluent/distributions/' + targpath
if identity.get('subname', None):
if identity.get('subname', None): # subname is to indicate disk number in a media set
targpath += '/' + identity['subname']
self.targpath = '/var/lib/confluent/distributions/' + targpath
if os.path.exists(self.targpath):
del importing[importkey]
raise Exception('{0} already exists'.format(self.targpath))
errstr = '{0} already exists'.format(self.targpath)
if checkonly:
self.errors = [errstr]
else:
raise Exception(errstr)
if checkonly:
return
importing[importkey] = self
self.filename = os.path.abspath(media)
self.error = ''
self.importer = eventlet.spawn(self.importmedia)
@@ -884,7 +901,7 @@ class MediaImporter(object):
os.environ['CONFLUENT_MEDIAFD'] = '{0}'.format(self.medfile.fileno())
with open(os.devnull, 'w') as devnull:
self.worker = subprocess.Popen(
[sys.executable, __file__, self.filename, '-b'],
[sys.executable, __file__, self.filename, '-b', self.targpath, self.distpath, self.customname],
stdin=devnull, stdout=subprocess.PIPE, close_fds=False)
wkr = self.worker
currline = b''
@@ -924,7 +941,7 @@ class MediaImporter(object):
self.oscategory)
try:
generate_stock_profiles(defprofile, self.distpath, self.targpath,
self.osname, self.profiles)
self.osname, self.profiles, self.customname)
except Exception as e:
self.phase = 'error'
self.error = str(e)
@@ -951,7 +968,7 @@ if __name__ == '__main__':
os.umask(0o022)
if len(sys.argv) > 2:
mfd = os.environ.get('CONFLUENT_MEDIAFD', None)
sys.exit(import_image(sys.argv[1], callback=printit, backend=True, mfd=mfd))
sys.exit(import_image(sys.argv[1], callback=printit, backend=True, mfd=mfd, custtargpath=sys.argv[3], custdistpath=sys.argv[4], custname=sys.argv[5]))
else:
sys.exit(import_image(sys.argv[1], callback=printit))

View File

@@ -21,16 +21,16 @@ import confluent.util as util
from fnmatch import fnmatch
def retrieve(nodes, element, configmanager, inputdata):
def retrieve(nodes, element, configmanager, inputdata, clearwarnbynode=None):
configmanager.check_quorum()
if nodes is not None:
return retrieve_nodes(nodes, element, configmanager, inputdata)
return retrieve_nodes(nodes, element, configmanager, inputdata, clearwarnbynode)
elif element[0] == 'nodegroups':
return retrieve_nodegroup(
element[1], element[3], configmanager, inputdata)
element[1], element[3], configmanager, inputdata, clearwarnbynode)
def retrieve_nodegroup(nodegroup, element, configmanager, inputdata):
def retrieve_nodegroup(nodegroup, element, configmanager, inputdata, clearwarnbynode=None):
try:
grpcfg = configmanager.get_nodegroup_attributes(nodegroup)
except KeyError:
@@ -106,10 +106,12 @@ def retrieve_nodegroup(nodegroup, element, configmanager, inputdata):
raise Exception("BUGGY ATTRIBUTE FOR NODEGROUP")
def retrieve_nodes(nodes, element, configmanager, inputdata):
def retrieve_nodes(nodes, element, configmanager, inputdata, clearwarnbynode):
attributes = configmanager.get_node_attributes(nodes)
if element[-1] == 'all':
for node in util.natural_sort(nodes):
if clearwarnbynode and node in clearwarnbynode:
yield msg.Attributes(node, {'_warnings': clearwarnbynode[node]})
theattrs = set(allattributes.node).union(set(attributes[node]))
for attribute in sorted(theattrs):
if attribute in attributes[node]: # have a setting for it
@@ -266,6 +268,7 @@ def update_nodes(nodes, element, configmanager, inputdata):
namemap[node] = rename['rename']
configmanager.rename_nodes(namemap)
return yield_rename_resources(namemap, isnode=True)
clearwarnbynode = {}
for node in nodes:
updatenode = inputdata.get_attributes(node, allattributes.node)
clearattribs = []
@@ -299,10 +302,11 @@ def update_nodes(nodes, element, configmanager, inputdata):
markup = (e.text[:e.offset-1] + '-->' + e.text[e.offset-1] + '<--' + e.text[e.offset:]).strip()
raise exc.InvalidArgumentException('Syntax error in attribute name: "{0}"'.format(markup))
if len(clearattribs) > 0:
configmanager.clear_node_attributes([node], clearattribs)
clearwarnbynode[node] = []
configmanager.clear_node_attributes([node], clearattribs, warnings=clearwarnbynode[node])
updatedict[node] = updatenode
try:
configmanager.set_node_attributes(updatedict)
except ValueError as e:
raise exc.InvalidArgumentException(str(e))
return retrieve(nodes, element, configmanager, inputdata)
return retrieve(nodes, element, configmanager, inputdata, clearwarnbynode)

View File

@@ -15,10 +15,31 @@ import confluent.core as core
import confluent.messages as msg
import pyghmi.exceptions as pygexc
import confluent.exceptions as exc
import eventlet.queue as queue
import eventlet.greenpool as greenpool
def reseat_bays(encmgr, bays, configmanager, rspq):
try:
for encbay in bays:
node = bays[encbay]
try:
for rsp in core.handle_path(
'/nodes/{0}/_enclosure/reseat_bay'.format(encmgr),
'update', configmanager,
inputdata={'reseat': int(encbay)}):
rspq.put(rsp)
except pygexc.UnsupportedFunctionality as uf:
rspq.put(msg.ConfluentNodeError(node, str(uf)))
except exc.TargetEndpointUnreachable as uf:
rspq.put(msg.ConfluentNodeError(node, str(uf)))
finally:
rspq.put(None)
def update(nodes, element, configmanager, inputdata):
emebs = configmanager.get_node_attributes(
nodes, (u'enclosure.manager', u'enclosure.bay'))
baysbyencmgr = {}
for node in nodes:
try:
em = emebs[node]['enclosure.manager']['value']
@@ -30,13 +51,20 @@ def update(nodes, element, configmanager, inputdata):
em = node
if not eb:
eb = -1
try:
for rsp in core.handle_path(
'/nodes/{0}/_enclosure/reseat_bay'.format(em),
'update', configmanager,
inputdata={'reseat': int(eb)}):
yield rsp
except pygexc.UnsupportedFunctionality as uf:
yield msg.ConfluentNodeError(node, str(uf))
except exc.TargetEndpointUnreachable as uf:
yield msg.ConfluentNodeError(node, str(uf))
if em not in baysbyencmgr:
baysbyencmgr[em] = {}
baysbyencmgr[em][eb] = node
rspq = queue.Queue()
gp = greenpool.GreenPool(64)
for encmgr in baysbyencmgr:
gp.spawn_n(reseat_bays, encmgr, baysbyencmgr[encmgr], configmanager, rspq)
while gp.running():
nrsp = rspq.get()
if nrsp is not None:
yield nrsp
while not rspq.empty():
nrsp = rspq.get()
if nrsp is not None:
yield nrsp

View File

@@ -15,10 +15,16 @@ import confluent.core as core
import confluent.messages as msg
import pyghmi.exceptions as pygexc
import confluent.exceptions as exc
import eventlet.greenpool as greenpool
import eventlet.queue as queue
class TaskDone:
pass
def retrieve(nodes, element, configmanager, inputdata):
emebs = configmanager.get_node_attributes(
nodes, (u'power.*pdu', u'power.*outlet'))
relpdus = {}
if element == ['power', 'inlets']:
outletnames = set([])
for node in nodes:
@@ -39,13 +45,36 @@ def retrieve(nodes, element, configmanager, inputdata):
for pgroup in outlets[node]:
pdu = outlets[node][pgroup]['pdu']
outlet = outlets[node][pgroup]['outlet']
try:
for rsp in core.handle_path(
'/nodes/{0}/power/outlets/{1}'.format(pdu, outlet),
'retrieve', configmanager):
yield msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node)
except exc.TargetEndpointBadCredentials:
yield msg.ConfluentTargetInvalidCredentials(pdu)
if pdu not in relpdus:
relpdus[pdu] = {}
relpdus[pdu][outlet] = (node, pgroup)
rspq = queue.Queue()
gp = greenpool.GreenPool(64)
for pdu in relpdus:
gp.spawn(readpdu, pdu, relpdus[pdu], configmanager, rspq)
while gp.running():
nrsp = rspq.get()
if not isinstance(nrsp, TaskDone):
yield nrsp
while not rspq.empty():
nrsp = rspq.get()
if not isinstance(nrsp, TaskDone):
yield nrsp
def readpdu(pdu, outletmap, configmanager, rspq):
try:
for outlet in outletmap:
node, pgroup = outletmap[outlet]
try:
for rsp in core.handle_path(
'/nodes/{0}/power/outlets/{1}'.format(pdu, outlet),
'retrieve', configmanager):
rspq.put(msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node))
except exc.TargetEndpointBadCredentials:
rspq.put(msg.ConfluentTargetInvalidCredentials(pdu))
finally: # ensure thhat at least one thing triggers the get
rspq.put(TaskDone())
def get_outlets(nodes, emebs, inletname):
outlets = {}
@@ -72,11 +101,34 @@ def update(nodes, element, configmanager, inputdata):
emebs = configmanager.get_node_attributes(
nodes, (u'power.*pdu', u'power.*outlet'))
inletname = element[-1]
relpdus = {}
rspq = queue.Queue()
gp = greenpool.GreenPool(64)
outlets = get_outlets(nodes, emebs, inletname)
for node in outlets:
for pgroup in outlets[node]:
pdu = outlets[node][pgroup]['pdu']
outlet = outlets[node][pgroup]['outlet']
if pdu not in relpdus:
relpdus[pdu] = {}
relpdus[pdu][outlet] = (node, pgroup)
for pdu in relpdus:
gp.spawn(updatepdu, pdu, relpdus[pdu], configmanager, inputdata, rspq)
while gp.running():
nrsp = rspq.get()
if not isinstance(nrsp, TaskDone):
yield nrsp
while not rspq.empty():
nrsp = rspq.get()
if not isinstance(nrsp, TaskDone):
yield nrsp
def updatepdu(pdu, outletmap, configmanager, inputdata, rspq):
try:
for outlet in outletmap:
node, pgroup = outletmap[outlet]
for rsp in core.handle_path('/nodes/{0}/power/outlets/{1}'.format(pdu, outlet),
'update', configmanager, inputdata={'state': inputdata.powerstate(node)}):
yield msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node)
rspq.put(msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node))
finally:
rspq.put(TaskDone())

View File

@@ -93,8 +93,14 @@ def retrieve(nodes, element, configmanager, inputdata):
'/noderange/{0}/description'.format(needheight),
'retrieve', configmanager,
inputdata=None):
if not hasattr(rsp, 'kvpairs'):
results['errors'].append((rsp.node, rsp.error))
continue
kvp = rsp.kvpairs
for node in kvp:
allnodedata[node]['height'] = kvp[node]['height']
for node in allnodedata:
if 'height' not in allnodedata[node]:
allnodedata[node]['height'] = 1
yield msg.Generic(results)

View File

@@ -32,6 +32,7 @@ anspypath = None
running_status = {}
class PlayRunner(object):
def __init__(self, playfiles, nodes):
self.stderr = ''
self.playfiles = playfiles
self.nodes = nodes
self.worker = None
@@ -63,6 +64,9 @@ class PlayRunner(object):
else:
textout += result['state'] + '\n'
textout += '\n'
if self.stderr:
textout += "ERRORS **********************************\n"
textout += self.stderr
return textout
def dump_json(self):
@@ -93,7 +97,8 @@ class PlayRunner(object):
[mypath, __file__, targnodes, playfilename],
stdin=devnull, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, self.stderr = worker.communicate()
stdout, stder = worker.communicate()
self.stderr += stder.decode('utf8')
current = memoryview(stdout)
while len(current):
sz = struct.unpack('=q', current[:8])[0]

View File

@@ -30,7 +30,7 @@ import eventlet
webclient = eventlet.import_patched('pyghmi.util.webclient')
currtz = None
currtz = 'UTC'
keymap = 'us'
currlocale = 'en_US.UTF-8'
currtzvintage = None
@@ -517,8 +517,8 @@ def handle_request(env, start_response):
pals = get_extra_names(nodename, cfg, myip)
result = syncfiles.start_syncfiles(
nodename, cfg, json.loads(reqbody), pals)
start_response(result, ())
yield ''
start_response(result[0], ())
yield result[1]
return
if 'GET' == operation:
status, output = syncfiles.get_syncresult(nodename)

View File

@@ -98,14 +98,15 @@ def initialize_ca():
preexec_fn=normalize_uid)
ouid = normalize_uid()
try:
os.makedirs('/var/lib/confluent/public/site/ssh/', mode=0o755)
except OSError as e:
if e.errno != 17:
raise
try:
os.makedirs('/var/lib/confluent/public/site/ssh/', mode=0o755)
except OSError as e:
if e.errno != 17:
raise
cafilename = '/var/lib/confluent/public/site/ssh/{0}.ca'.format(myname)
shutil.copy('/etc/confluent/ssh/ca.pub', cafilename)
finally:
os.seteuid(ouid)
cafilename = '/var/lib/confluent/public/site/ssh/{0}.ca'.format(myname)
shutil.copy('/etc/confluent/ssh/ca.pub', cafilename)
# newent = '@cert-authority * ' + capub.read()
@@ -185,6 +186,14 @@ def initialize_root_key(generate, automation=False):
if os.path.exists('/etc/confluent/ssh/automation'):
alreadyexist = True
else:
ouid = normalize_uid()
try:
os.makedirs('/etc/confluent/ssh', mode=0o700)
except OSError as e:
if e.errno != 17:
raise
finally:
os.seteuid(ouid)
subprocess.check_call(
['ssh-keygen', '-t', 'ed25519',
'-f','/etc/confluent/ssh/automation', '-N', get_passphrase(),

View File

@@ -24,6 +24,7 @@ import confluent.noderange as noderange
import eventlet
import pwd
import grp
import sys
def mkdirp(path):
try:
@@ -193,8 +194,8 @@ def sync_list_to_node(sl, node, suffixes, peerip=None):
targip = node
if peerip:
targip = peerip
output = util.run(
['rsync', '-rvLD', targdir + '/', 'root@[{}]:/'.format(targip)])[0]
output, stderr = util.run(
['rsync', '-rvLD', targdir + '/', 'root@[{}]:/'.format(targip)])
except Exception as e:
if 'CalledProcessError' not in repr(e):
# https://github.com/eventlet/eventlet/issues/413
@@ -212,8 +213,11 @@ def sync_list_to_node(sl, node, suffixes, peerip=None):
unreadablefiles.append(filename.replace(targdir, ''))
if unreadablefiles:
raise Exception("Syncing failed due to unreadable files: " + ','.join(unreadablefiles))
elif b'Permission denied, please try again.' in e.stderr:
elif hasattr(e, 'stderr') and e.stderr and b'Permission denied, please try again.' in e.stderr:
raise Exception('Syncing failed due to authentication error, is the confluent automation key not set up (osdeploy initialize -a) or is there some process replacing authorized_keys on the host?')
elif hasattr(e, 'stderr') and e.stderr:
sys.stderr.write(e.stderr.decode('utf8'))
raise
else:
raise
finally:
@@ -231,7 +235,7 @@ def stage_ent(currmap, ent, targdir, appendexist=False):
everyfent = []
allfents = ent.split()
for tmpent in allfents:
fents = glob.glob(tmpent)
fents = glob.glob(tmpent) # TODO: recursive globbing?
if not fents:
raise Exception('No matching files for "{}"'.format(tmpent))
everyfent.extend(fents)
@@ -281,9 +285,10 @@ def mkpathorlink(source, destination, appendexist=False):
syncrunners = {}
cleaner = None
def start_syncfiles(nodename, cfg, suffixes, principals=[]):
global cleaner
peerip = None
if 'myips' in suffixes:
targips = suffixes['myips']
@@ -307,13 +312,41 @@ def start_syncfiles(nodename, cfg, suffixes, principals=[]):
raise Exception('Cannot perform syncfiles without profile assigned')
synclist = '/var/lib/confluent/public/os/{}/syncfiles'.format(profile)
if not os.path.exists(synclist):
return '200 OK' # not running
return '200 OK', 'No synclist' # not running
sl = SyncList(synclist, nodename, cfg)
if not (sl.appendmap or sl.mergemap or sl.replacemap or sl.appendoncemap):
return '200 OK' # the synclist has no actual entries
return '200 OK', 'Empty synclist' # the synclist has no actual entries
if nodename in syncrunners:
if syncrunners[nodename].dead:
syncrunners[nodename].wait()
else:
return '503 Synchronization already in progress', 'Synchronization already in progress for {}'.format(nodename)
syncrunners[nodename] = eventlet.spawn(
sync_list_to_node, sl, nodename, suffixes, peerip)
return '202 Queued' # backgrounded
if not cleaner:
cleaner = eventlet.spawn(cleanit)
return '202 Queued', 'Background synchronization initiated' # backgrounded
def cleanit():
toreap = {}
while True:
for nn in list(syncrunners):
if syncrunners[nn].dead:
if nn in toreap:
try:
syncrunners[nn].wait()
except Exception as e:
print(repr(e))
pass
del syncrunners[nn]
del toreap[nn]
else:
toreap[nn] = 1
elif nn in toreap:
del toreap[nn]
eventlet.sleep(30)
def get_syncresult(nodename):
if nodename not in syncrunners:

View File

@@ -29,9 +29,9 @@ import struct
import eventlet.green.subprocess as subprocess
def mkdirp(path):
def mkdirp(path, mode=0o777):
try:
os.makedirs(path)
os.makedirs(path, mode)
except OSError as e:
if e.errno != 17:
raise
@@ -168,7 +168,7 @@ def cert_matches(fingerprint, certificate):
return algo(certificate).digest() == fingerprint
algo, _, fp = fingerprint.partition('$')
newfp = None
if algo in ('sha512', 'sha256'):
if algo in ('sha512', 'sha256', 'sha384'):
newfp = get_fingerprint(certificate, algo)
return newfp and fingerprint == newfp

View File

@@ -17,12 +17,12 @@ Requires: confluent_vtbufferd
Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic
%else
%if "%{dist}" == ".el8"
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-monotonic, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
%else
%if "%{dist}" == ".el9"
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
%else
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodome >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dnspython, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-PyYAML openssl iproute
Requires: python3-dbm,python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodome >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dnspython, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-PyYAML openssl iproute
%endif
%endif
%endif

View File

@@ -19,6 +19,7 @@ setup(
'confluent/plugins/hardwaremanagement/',
'confluent/plugins/deployment/',
'confluent/plugins/console/',
'confluent/plugins/info/',
'confluent/plugins/shell/',
'confluent/collective/',
'confluent/plugins/configuration/'],

View File

@@ -16,7 +16,7 @@ Restart=on-failure
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_CHOWN CAP_NET_RAW
User=confluent
Group=confluent
DevicePolicy=closed
#DevicePolicy=closed # fuse filesystem requires us to interact with /dev/fuse
ProtectControlGroups=true
ProtectSystem=true

View File

@@ -22,3 +22,16 @@ modification, are permitted provided that the following conditions are met:
* Neither the name of the copyright holder nor the
names of contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS,
* COPYRIGHT HOLDERS, OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,8 +1,14 @@
#include <asm-generic/socket.h>
#define _GNU_SOURCE
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <locale.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <sys/un.h>
#include <fcntl.h>
#include "tmt.h"
#define HASHSIZE 2053
#define MAXNAMELEN 256
@@ -10,13 +16,17 @@
struct terment {
struct terment *next;
char *name;
int fd;
TMT *vt;
};
#define SETNODE 1
#define WRITE 2
#define READBUFF 0
#define CLOSECONN 3
#define MAXEVTS 16
static struct terment *buffers[HASHSIZE];
static char* nodenames[HASHSIZE];
unsigned long hash(char *str)
/* djb2a */
@@ -37,10 +47,13 @@ TMT *get_termentbyname(char *name) {
return NULL;
}
TMT *set_termentbyname(char *name) {
TMT *set_termentbyname(char *name, int fd) {
struct terment *ret;
int idx;
if (nodenames[fd] == NULL) {
nodenames[fd] = strdup(name);
}
idx = hash(name);
for (ret = buffers[idx]; ret != NULL; ret = ret->next)
if (strcmp(name, ret->name) == 0)
@@ -48,12 +61,13 @@ TMT *set_termentbyname(char *name) {
ret = (struct terment *)malloc(sizeof(*ret));
ret->next = buffers[idx];
ret->name = strdup(name);
ret->fd = fd;
ret->vt = tmt_open(31, 100, NULL, NULL, L"→←↑↓■◆▒°±▒┘┐┌└┼⎺───⎽├┤┴┬│≤≥π≠£•");
buffers[idx] = ret;
return ret->vt;
}
void dump_vt(TMT* outvt) {
void dump_vt(TMT* outvt, int outfd) {
const TMTSCREEN *out = tmt_screen(outvt);
const TMTPOINT *curs = tmt_cursor(outvt);
int line, idx, maxcol, maxrow;
@@ -67,9 +81,10 @@ void dump_vt(TMT* outvt) {
tmt_color_t fg = TMT_COLOR_DEFAULT;
tmt_color_t bg = TMT_COLOR_DEFAULT;
wchar_t sgrline[30];
char strbuffer[128];
size_t srgidx = 0;
char colorcode = 0;
wprintf(L"\033c");
write(outfd, "\033c", 2);
maxcol = 0;
maxrow = 0;
for (line = out->nline - 1; line >= 0; --line) {
@@ -148,60 +163,136 @@ void dump_vt(TMT* outvt) {
}
if (sgrline[0] != 0) {
sgrline[wcslen(sgrline) - 1] = 0; // Trim last ;
wprintf(L"\033[%lsm", sgrline);
snprintf(strbuffer, sizeof(strbuffer), "\033[%lsm", sgrline);
write(outfd, strbuffer, strlen(strbuffer));
write(outfd, "\033[]", 3);
}
wprintf(L"%lc", out->lines[line]->chars[idx].c);
snprintf(strbuffer, sizeof(strbuffer), "%lc", out->lines[line]->chars[idx].c);
write(outfd, strbuffer, strlen(strbuffer));
}
if (line < maxrow)
wprintf(L"\r\n");
write(outfd, "\r\n", 2);
}
fflush(stdout);
wprintf(L"\x1b[%ld;%ldH", curs->r + 1, curs->c + 1);
fflush(stdout);
//fflush(stdout);
snprintf(strbuffer, sizeof(strbuffer), "\x1b[%ld;%ldH", curs->r + 1, curs->c + 1);
write(outfd, strbuffer, strlen(strbuffer));
//fflush(stdout);
}
int handle_traffic(int fd) {
int cmd, length;
char currnode[MAXNAMELEN];
char cmdbuf[MAXDATALEN];
char *nodename;
TMT *currvt = NULL;
TMT *outvt = NULL;
length = read(fd, &cmd, 4);
if (length <= 0) {
return 0;
}
length = cmd & 536870911;
cmd = cmd >> 29;
if (cmd == SETNODE) {
cmd = read(fd, currnode, length);
currnode[length] = 0;
if (cmd < 0)
return 0;
currvt = set_termentbyname(currnode, fd);
} else if (cmd == WRITE) {
if (currvt == NULL) {
nodename = nodenames[fd];
currvt = set_termentbyname(nodename, fd);
}
cmd = read(fd, cmdbuf, length);
cmdbuf[length] = 0;
if (cmd < 0)
return 0;
tmt_write(currvt, cmdbuf, length);
} else if (cmd == READBUFF) {
cmd = read(fd, cmdbuf, length);
cmdbuf[length] = 0;
if (cmd < 0)
return 0;
outvt = get_termentbyname(cmdbuf);
if (outvt != NULL)
dump_vt(outvt, fd);
length = write(fd, "\x00", 1);
if (length < 0)
return 0;
} else if (cmd == CLOSECONN) {
return 0;
}
return 1;
}
int main(int argc, char* argv[]) {
int cmd, length;
setlocale(LC_ALL, "");
char cmdbuf[MAXDATALEN];
char currnode[MAXNAMELEN];
TMT *currvt = NULL;
TMT *outvt = NULL;
struct sockaddr_un addr;
int numevts;
int status;
int poller;
int n;
socklen_t len;
int ctlsock, currsock;
socklen_t addrlen;
struct ucred ucr;
struct epoll_event epvt, evts[MAXEVTS];
stdin = freopen(NULL, "rb", stdin);
if (stdin == NULL) {
exit(1);
}
memset(&addr, 0, sizeof(struct sockaddr_un));
addr.sun_family = AF_UNIX;
strncpy(addr.sun_path + 1, argv[1], sizeof(addr.sun_path) - 2); // abstract namespace socket
ctlsock = socket(AF_UNIX, SOCK_STREAM, 0);
status = bind(ctlsock, (const struct sockaddr*)&addr, sizeof(sa_family_t) + strlen(argv[1]) + 1); //sizeof(struct sockaddr_un));
if (status < 0) {
perror("Unable to open unix socket - ");
exit(1);
}
listen(ctlsock, 128);
poller = epoll_create(1);
memset(&epvt, 0, sizeof(struct epoll_event));
epvt.events = EPOLLIN;
epvt.data.fd = ctlsock;
if (epoll_ctl(poller, EPOLL_CTL_ADD, ctlsock, &epvt) < 0) {
perror("Unable to poll the socket");
exit(1);
}
// create a unix domain socket for accepting, each connection is only allowed to either read or write, not both
while (1) {
length = fread(&cmd, 4, 1, stdin);
if (length < 0)
continue;
length = cmd & 536870911;
cmd = cmd >> 29;
if (cmd == SETNODE) {
cmd = fread(currnode, 1, length, stdin);
currnode[length] = 0;
if (cmd < 0)
continue;
currvt = set_termentbyname(currnode);
} else if (cmd == WRITE) {
if (currvt == NULL)
currvt = set_termentbyname("");
cmd = fread(cmdbuf, 1, length, stdin);
cmdbuf[length] = 0;
if (cmd < 0)
continue;
tmt_write(currvt, cmdbuf, length);
} else if (cmd == READBUFF) {
cmd = fread(cmdbuf, 1, length, stdin);
cmdbuf[length] = 0;
if (cmd < 0)
continue;
outvt = get_termentbyname(cmdbuf);
if (outvt != NULL)
dump_vt(outvt);
length = write(1, "\x00", 1);
if (length < 0)
continue;
numevts = epoll_wait(poller, evts, MAXEVTS, -1);
if (numevts < 0) {
perror("Failed wait");
exit(1);
}
for (n = 0; n < numevts; ++n) {
if (evts[n].data.fd == ctlsock) {
currsock = accept(ctlsock, (struct sockaddr *) &addr, &addrlen);
len = sizeof(ucr);
getsockopt(currsock, SOL_SOCKET, SO_PEERCRED, &ucr, &len);
if (ucr.uid != getuid()) { // block access for other users
close(currsock);
continue;
}
memset(&epvt, 0, sizeof(struct epoll_event));
epvt.events = EPOLLIN;
epvt.data.fd = currsock;
epoll_ctl(poller, EPOLL_CTL_ADD, currsock, &epvt);
} else {
if (!handle_traffic(evts[n].data.fd)) {
epoll_ctl(poller, EPOLL_CTL_DEL, evts[n].data.fd, NULL);
close(evts[n].data.fd);
if (nodenames[evts[n].data.fd] != NULL) {
free(nodenames[evts[n].data.fd]);
nodenames[evts[n].data.fd] = NULL;
}
}
}
}
}
}

View File

@@ -29,7 +29,7 @@ for lic in $(cat /tmp/tmpliclist); do
fname=$(basename $lo)
dlo=$(dirname $lo)
if [[ "$dlo" == *"-lib"* ]]; then
dlo=${dlo/-*}
dlo=${dlo/-lib*}
elif [[ "$dlo" == "device-mapper-"* ]]; then
dlo=${dlo/-*}-mapper
elif [[ "$dlo" == "bind-"* ]]; then
@@ -44,6 +44,10 @@ for lic in $(cat /tmp/tmpliclist); do
cp $lic licenses/$dlo/$fname
lo=$dlo/$fname
echo %license /opt/confluent/genesis/%{arch}/licenses/$lo >> confluent-genesis-out.spec
if [ "$fname" == README ] && [ "$dlo" == "zlib" ]; then
cp $lic licenses/nss/$fname
echo %license /opt/confluent/genesis/%{arch}/licenses/nss/$fname >> confluent-genesis-out.spec
fi
done
mkdir -p licenses/ipmitool
cp /usr/share/doc/ipmitool/COPYING licenses/ipmitool

Some files were not shown because too many files have changed in this diff Show More