From 6833cd9c5301ebfca28c3c68ee2f912b2ee0d643 Mon Sep 17 00:00:00 2001 From: Markus Hilger Date: Fri, 9 Aug 2024 17:58:48 +0200 Subject: [PATCH 1/8] Add VLAN/PKEY support to confignet Introduce new node attribute net.vlan_id to support VLAN/PKEY configuration using confignet. --- .../common/profile/scripts/confignet | 27 ++++++++++++++----- .../confluent/config/attributes.py | 8 ++++-- confluent_server/confluent/netutil.py | 3 +++ 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/confluent_osdeploy/common/profile/scripts/confignet b/confluent_osdeploy/common/profile/scripts/confignet index cb5684a8..7b0eddf9 100644 --- a/confluent_osdeploy/common/profile/scripts/confignet +++ b/confluent_osdeploy/common/profile/scripts/confignet @@ -405,20 +405,35 @@ class NetworkManager(object): else: cname = stgs.get('connection_name', None) iname = list(cfg['interfaces'])[0] - if not cname: - cname = iname + ctype = self.devtypes[iname] + if stgs.get('vlan_id', None): + vlan = stgs['vlan_id'] + if ctype == 'infiniband': + vlan = '0x{0}'.format(vlan) if not vlan.startswith('0x') else vlan + cmdargs['infiniband.parent'] = iname + cmdargs['infiniband.p-key'] = vlan + iname = '{0}.{1}'.format(iname, vlan[2:]) + cname = iname if not cname else cname + elif ctype == 'ethernet': + ctype = 'vlan' + cmdargs['vlan.parent'] = iname + cmdargs['vlan.id'] = vlan + iname = '{0}.{1}'.format(iname, vlan) + cname = iname if not cname else cname + else: + sys.stderr.write("Warning, unknown interface_name ({0}) device type ({1}) for VLAN/PKEY, skipping setup\n".format(iname, ctype)) + return + cname = iname if not cname else cname u = self.uuidbyname.get(cname, None) cargs = [] for arg in cmdargs: cargs.append(arg) cargs.append(cmdargs[arg]) if u: - cargs.append('connection.interface-name') - cargs.append(iname) - subprocess.check_call(['nmcli', 'c', 'm', u] + cargs) + subprocess.check_call(['nmcli', 'c', 'm', u, 'connection.interface-name', iname] + cargs) subprocess.check_call(['nmcli', 'c', 'u', u]) else: - subprocess.check_call(['nmcli', 'c', 'add', 'type', self.devtypes[iname], 'con-name', cname, 'connection.interface-name', iname] + cargs) + subprocess.check_call(['nmcli', 'c', 'add', 'type', ctype, 'con-name', cname, 'connection.interface-name', iname] + cargs) self.read_connections() u = self.uuidbyname.get(cname, None) if u: diff --git a/confluent_server/confluent/config/attributes.py b/confluent_server/confluent/config/attributes.py index 101ee03d..f926c962 100644 --- a/confluent_server/confluent/config/attributes.py +++ b/confluent_server/confluent/config/attributes.py @@ -469,9 +469,13 @@ node = { 'net.interface_names': { 'description': 'Interface name or comma delimited list of names to match for this interface. It is generally recommended ' 'to leave this blank unless needing to set up interfaces that are not on a common subnet with a confluent server, ' - 'as confluent servers provide autodetection for matching the correct network definition to an interface.' + 'as confluent servers provide autodetection for matching the correct network definition to an interface. ' 'This would be the default name per the deployed OS and can be a comma delimited list to denote members of ' - 'a team' + 'a team or a single interface for VLAN/PKEY connections.' + }, + 'net.vlan_id': { + 'description': 'Ethernet VLAN or InfiniBand PKEY to use for this connection. ' + 'Specify the parent device using net.interface_names.' }, 'net.ipv4_address': { 'description': 'When configuring static, use this address. If ' diff --git a/confluent_server/confluent/netutil.py b/confluent_server/confluent/netutil.py index 9bac92c2..c1a9210a 100644 --- a/confluent_server/confluent/netutil.py +++ b/confluent_server/confluent/netutil.py @@ -193,6 +193,9 @@ class NetManager(object): iname = attribs.get('interface_names', None) if iname: myattribs['interface_names'] = iname + vlanid = attribs.get('vlan_id', None) + if vlanid: + myattribs['vlan_id'] = vlanid teammod = attribs.get('team_mode', None) if teammod: myattribs['team_mode'] = teammod From 6943c2dc0f1f0cfa7b530d6d09ccbdd199764efb Mon Sep 17 00:00:00 2001 From: Markus Hilger Date: Fri, 9 Aug 2024 19:38:45 +0200 Subject: [PATCH 2/8] Make sure VLAN/PKEY connections are created last Needed for VLANs on bond connections etc. --- confluent_osdeploy/common/profile/scripts/confignet | 2 ++ 1 file changed, 2 insertions(+) diff --git a/confluent_osdeploy/common/profile/scripts/confignet b/confluent_osdeploy/common/profile/scripts/confignet index 7b0eddf9..20fcc8b8 100644 --- a/confluent_osdeploy/common/profile/scripts/confignet +++ b/confluent_osdeploy/common/profile/scripts/confignet @@ -516,6 +516,8 @@ if __name__ == '__main__': netname_to_interfaces['default']['interfaces'] -= netname_to_interfaces[netn]['interfaces'] if not netname_to_interfaces['default']['interfaces']: del netname_to_interfaces['default'] + # Make sure VLAN/PKEY connections are created last + netname_to_interfaces = dict(sorted(netname_to_interfaces.items(), key=lambda item: 'vlan_id' in item[1]['settings'])) rm_tmp_llas(tmpllas) if os.path.exists('/usr/sbin/netplan'): nm = NetplanManager(dc) From 005adec437dc631d5b3f9f7b38cc640336bdc636 Mon Sep 17 00:00:00 2001 From: Markus Hilger Date: Fri, 9 Aug 2024 19:45:19 +0200 Subject: [PATCH 3/8] Add error handling for interface_names --- confluent_osdeploy/common/profile/scripts/confignet | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/confluent_osdeploy/common/profile/scripts/confignet b/confluent_osdeploy/common/profile/scripts/confignet index 20fcc8b8..562a8ca1 100644 --- a/confluent_osdeploy/common/profile/scripts/confignet +++ b/confluent_osdeploy/common/profile/scripts/confignet @@ -405,7 +405,10 @@ class NetworkManager(object): else: cname = stgs.get('connection_name', None) iname = list(cfg['interfaces'])[0] - ctype = self.devtypes[iname] + ctype = self.devtypes.get(iname, None) + if not ctype: + sys.stderr.write("Warning, no device found for interface_name ({0}), skipping setup\n".format(iname)) + return if stgs.get('vlan_id', None): vlan = stgs['vlan_id'] if ctype == 'infiniband': From 09611744258586eba22cb5fa19e16d5b6ca2759b Mon Sep 17 00:00:00 2001 From: Markus Hilger Date: Fri, 9 Aug 2024 19:55:42 +0200 Subject: [PATCH 4/8] Remove redundant code --- confluent_osdeploy/common/profile/scripts/confignet | 2 -- 1 file changed, 2 deletions(-) diff --git a/confluent_osdeploy/common/profile/scripts/confignet b/confluent_osdeploy/common/profile/scripts/confignet index 562a8ca1..650f4eb6 100644 --- a/confluent_osdeploy/common/profile/scripts/confignet +++ b/confluent_osdeploy/common/profile/scripts/confignet @@ -416,13 +416,11 @@ class NetworkManager(object): cmdargs['infiniband.parent'] = iname cmdargs['infiniband.p-key'] = vlan iname = '{0}.{1}'.format(iname, vlan[2:]) - cname = iname if not cname else cname elif ctype == 'ethernet': ctype = 'vlan' cmdargs['vlan.parent'] = iname cmdargs['vlan.id'] = vlan iname = '{0}.{1}'.format(iname, vlan) - cname = iname if not cname else cname else: sys.stderr.write("Warning, unknown interface_name ({0}) device type ({1}) for VLAN/PKEY, skipping setup\n".format(iname, ctype)) return From a6a1907611411f19092a7581fed1d5d15415cff9 Mon Sep 17 00:00:00 2001 From: Adrian Reber Date: Tue, 13 Aug 2024 17:30:43 +0200 Subject: [PATCH 5/8] Do not overwrite the node SSH key with the last found public key Instead of overwriting the SSH public code for the node concatenate all found SSH keys together in one file. Signed-off-by: Adrian Reber --- confluent_server/confluent/sshutil.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/confluent_server/confluent/sshutil.py b/confluent_server/confluent/sshutil.py index cf17f37a..1f8960d8 100644 --- a/confluent_server/confluent/sshutil.py +++ b/confluent_server/confluent/sshutil.py @@ -213,15 +213,17 @@ def initialize_root_key(generate, automation=False): suffix = 'automationpubkey' else: suffix = 'rootpubkey' + keyname = '/var/lib/confluent/public/site/ssh/{0}.{1}'.format( + myname, suffix) for auth in authorized: - shutil.copy( - auth, - '/var/lib/confluent/public/site/ssh/{0}.{1}'.format( - myname, suffix)) - os.chmod('/var/lib/confluent/public/site/ssh/{0}.{1}'.format( - myname, suffix), 0o644) - os.chown('/var/lib/confluent/public/site/ssh/{0}.{1}'.format( - myname, suffix), neededuid, -1) + local_key = open(auth, 'r') + dest = open(keyname, 'a') + dest.write(local_key.read()) + local_key.close() + dest.close() + if os.path.exists(keyname): + os.chmod(keyname, 0o644) + os.chown(keyname, neededuid, -1) if alreadyexist: raise AlreadyExists() From 29d0e904876a249a6a3afdddc789b931b589e66e Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 14 Aug 2024 11:26:51 -0400 Subject: [PATCH 6/8] Implement confluentdbutil 'merge' For now, implement 'skip', where conflicting nodes/groups are ignored in new input. --- confluent_server/bin/confluentdbutil | 15 ++- .../confluent/config/configmanager.py | 106 ++++++++++++------ 2 files changed, 79 insertions(+), 42 deletions(-) diff --git a/confluent_server/bin/confluentdbutil b/confluent_server/bin/confluentdbutil index 25a5acf8..b7c1e5c7 100755 --- a/confluent_server/bin/confluentdbutil +++ b/confluent_server/bin/confluentdbutil @@ -30,7 +30,7 @@ import confluent.config.conf as conf import confluent.main as main argparser = optparse.OptionParser( - usage="Usage: %prog [options] [dump|restore] [path]") + usage="Usage: %prog [options] [dump|restore|merge] [path]") argparser.add_option('-p', '--password', help='Password to use to protect/unlock a protected dump') argparser.add_option('-i', '--interactivepassword', help='Prompt for password', @@ -51,13 +51,13 @@ argparser.add_option('-s', '--skipkeys', action='store_true', 'data is needed. keys do not change and as such ' 'they do not require incremental backup') (options, args) = argparser.parse_args() -if len(args) != 2 or args[0] not in ('dump', 'restore'): +if len(args) != 2 or args[0] not in ('dump', 'restore', 'merge'): argparser.print_help() sys.exit(1) dumpdir = args[1] -if args[0] == 'restore': +if args[0] in ('restore', 'merge'): pid = main.is_running() if pid is not None: print("Confluent is running, must shut down to restore db") @@ -69,9 +69,12 @@ if args[0] == 'restore': if options.interactivepassword: password = getpass.getpass('Enter password to restore backup: ') try: - cfm.init(True) - cfm.statelessmode = True - cfm.restore_db_from_directory(dumpdir, password) + stateless = args[0] == 'restore' + cfm.init(stateless) + cfm.statelessmode = stateless + cfm.restore_db_from_directory( + dumpdir, password, + merge="skip" if args[0] == 'merge' else False) cfm.statelessmode = False cfm.ConfigManager.wait_for_sync(True) if owner != 0: diff --git a/confluent_server/confluent/config/configmanager.py b/confluent_server/confluent/config/configmanager.py index 6cbf4604..788c2d60 100644 --- a/confluent_server/confluent/config/configmanager.py +++ b/confluent_server/confluent/config/configmanager.py @@ -1903,7 +1903,7 @@ class ConfigManager(object): def add_group_attributes(self, attribmap): self.set_group_attributes(attribmap, autocreate=True) - def set_group_attributes(self, attribmap, autocreate=False): + def set_group_attributes(self, attribmap, autocreate=False, merge="replace", keydata=None, skipped=None): for group in attribmap: curr = attribmap[group] for attrib in curr: @@ -1924,11 +1924,11 @@ class ConfigManager(object): if cfgstreams: exec_on_followers('_rpc_set_group_attributes', self.tenant, attribmap, autocreate) - self._true_set_group_attributes(attribmap, autocreate) + self._true_set_group_attributes(attribmap, autocreate, merge=merge, keydata=keydata, skipped=skipped) - def _true_set_group_attributes(self, attribmap, autocreate=False): + def _true_set_group_attributes(self, attribmap, autocreate=False, merge="replace", keydata=None, skipped=None): changeset = {} - for group in attribmap: + for group in list(attribmap): if group == '': raise ValueError('"{0}" is not a valid group name'.format( group)) @@ -1941,6 +1941,11 @@ class ConfigManager(object): group)) if not autocreate and group not in self._cfgstore['nodegroups']: raise ValueError("{0} group does not exist".format(group)) + if merge == 'skip' and group in self._cfgstore['nodegroups']: + if skipped is not None: + skipped.append(group) + del attribmap[group] + continue for attr in list(attribmap[group]): # first do a pass to normalize out any aliased attribute names if attr in _attraliases: @@ -2015,6 +2020,9 @@ class ConfigManager(object): newdict = {'value': attribmap[group][attr]} else: newdict = attribmap[group][attr] + if keydata and attr.startswith('secret.') and 'cryptvalue' in newdict: + newdict['value'] = decrypt_value(newdict['cryptvalue'], keydata['cryptkey'], keydata['integritykey']) + del newdict['cryptvalue'] if 'value' in newdict and attr.startswith("secret."): newdict['cryptvalue'] = crypt_value(newdict['value']) del newdict['value'] @@ -2349,7 +2357,7 @@ class ConfigManager(object): - def set_node_attributes(self, attribmap, autocreate=False): + def set_node_attributes(self, attribmap, autocreate=False, merge="replace", keydata=None, skipped=None): for node in attribmap: curr = attribmap[node] for attrib in curr: @@ -2370,9 +2378,9 @@ class ConfigManager(object): if cfgstreams: exec_on_followers('_rpc_set_node_attributes', self.tenant, attribmap, autocreate) - self._true_set_node_attributes(attribmap, autocreate) + self._true_set_node_attributes(attribmap, autocreate, merge, keydata, skipped) - def _true_set_node_attributes(self, attribmap, autocreate): + def _true_set_node_attributes(self, attribmap, autocreate, merge="replace", keydata=None, skipped=None): # TODO(jbjohnso): multi mgr support, here if we have peers, # pickle the arguments and fire them off in eventlet # flows to peers, all should have the same result @@ -2380,7 +2388,7 @@ class ConfigManager(object): changeset = {} # first do a sanity check of the input upfront # this mitigates risk of arguments being partially applied - for node in attribmap: + for node in list(attribmap): node = confluent.util.stringify(node) if node == '': raise ValueError('"{0}" is not a valid node name'.format(node)) @@ -2393,6 +2401,11 @@ class ConfigManager(object): '"{0}" is not a valid node name'.format(node)) if autocreate is False and node not in self._cfgstore['nodes']: raise ValueError("node {0} does not exist".format(node)) + if merge == "skip" and node in self._cfgstore['nodes']: + del attribmap[node] + if skipped is not None: + skipped.append(node) + continue if 'groups' not in attribmap[node] and node not in self._cfgstore['nodes']: attribmap[node]['groups'] = [] for attrname in list(attribmap[node]): @@ -2463,6 +2476,9 @@ class ConfigManager(object): # add check here, skip None attributes if newdict is None: continue + if keydata and attrname.startswith('secret.') and 'cryptvalue' in newdict: + newdict['value'] = decrypt_value(newdict['cryptvalue'], keydata['cryptkey'], keydata['integritykey']) + del newdict['cryptvalue'] if 'value' in newdict and attrname.startswith("secret."): newdict['cryptvalue'] = crypt_value(newdict['value']) del newdict['value'] @@ -2503,14 +2519,14 @@ class ConfigManager(object): self._bg_sync_to_file() #TODO: wait for synchronization to suceed/fail??) - def _load_from_json(self, jsondata, sync=True): + def _load_from_json(self, jsondata, sync=True, merge=False, keydata=None): self.inrestore = True try: - self._load_from_json_backend(jsondata, sync=True) + self._load_from_json_backend(jsondata, sync=True, merge=merge, keydata=keydata) finally: self.inrestore = False - def _load_from_json_backend(self, jsondata, sync=True): + def _load_from_json_backend(self, jsondata, sync=True, merge=False, keydata=None): """Load fresh configuration data from jsondata :param jsondata: String of jsondata @@ -2563,20 +2579,27 @@ class ConfigManager(object): pass # Now we have to iterate through each fixed up element, using the # set attribute to flesh out inheritence and expressions - _cfgstore['main']['idmap'] = {} + if (not merge) or _cfgstore.get('main', {}).get('idmap', None) is None: + _cfgstore['main']['idmap'] = {} + attribmerge = merge if merge else "replace" for confarea in _config_areas: - self._cfgstore[confarea] = {} + if not merge or confarea not in self._cfgstore: + self._cfgstore[confarea] = {} if confarea not in tmpconfig: continue if confarea == 'nodes': - self.set_node_attributes(tmpconfig[confarea], True) + self.set_node_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata) elif confarea == 'nodegroups': - self.set_group_attributes(tmpconfig[confarea], True) + self.set_group_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata) elif confarea == 'usergroups': + if merge: + continue for usergroup in tmpconfig[confarea]: role = tmpconfig[confarea][usergroup].get('role', 'Administrator') self.create_usergroup(usergroup, role=role) elif confarea == 'users': + if merge: + continue for user in tmpconfig[confarea]: ucfg = tmpconfig[confarea][user] uid = ucfg.get('id', None) @@ -2876,7 +2899,7 @@ def _restore_keys(jsond, password, newpassword=None, sync=True): newpassword = keyfile.read() set_global('master_privacy_key', _format_key(cryptkey, password=newpassword), sync) - if integritykey: + if integritykey: set_global('master_integrity_key', _format_key(integritykey, password=newpassword), sync) _masterkey = cryptkey @@ -2911,35 +2934,46 @@ def _dump_keys(password, dojson=True): return keydata -def restore_db_from_directory(location, password): +def restore_db_from_directory(location, password, merge=False): + kdd = None try: with open(os.path.join(location, 'keys.json'), 'r') as cfgfile: keydata = cfgfile.read() - json.loads(keydata) - _restore_keys(keydata, password) + kdd = json.loads(keydata) + if merge: + if 'cryptkey' in kdd: + kdd['cryptkey'] = _parse_key(kdd['cryptkey'], password) + if 'integritykey' in kdd: + kdd['integritykey'] = _parse_key(kdd['integritykey'], password) + else: + kdd['integritykey'] = None # GCM + else: + kdd = None + _restore_keys(keydata, password) except IOError as e: if e.errno == 2: raise Exception("Cannot restore without keys, this may be a " "redacted dump") - try: - moreglobals = json.load(open(os.path.join(location, 'globals.json'))) - for globvar in moreglobals: - set_global(globvar, moreglobals[globvar]) - except IOError as e: - if e.errno != 2: - raise - try: - collective = json.load(open(os.path.join(location, 'collective.json'))) - _cfgstore['collective'] = {} - for coll in collective: - add_collective_member(coll, collective[coll]['address'], - collective[coll]['fingerprint']) - except IOError as e: - if e.errno != 2: - raise + if not merge: + try: + moreglobals = json.load(open(os.path.join(location, 'globals.json'))) + for globvar in moreglobals: + set_global(globvar, moreglobals[globvar]) + except IOError as e: + if e.errno != 2: + raise + try: + collective = json.load(open(os.path.join(location, 'collective.json'))) + _cfgstore['collective'] = {} + for coll in collective: + add_collective_member(coll, collective[coll]['address'], + collective[coll]['fingerprint']) + except IOError as e: + if e.errno != 2: + raise with open(os.path.join(location, 'main.json'), 'r') as cfgfile: cfgdata = cfgfile.read() - ConfigManager(tenant=None)._load_from_json(cfgdata) + ConfigManager(tenant=None)._load_from_json(cfgdata, merge=merge, keydata=kdd) ConfigManager.wait_for_sync(True) From 28b88bdb12d78f16a29549a3ab4f2d914252c434 Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 14 Aug 2024 11:40:11 -0400 Subject: [PATCH 7/8] Add reporting of skipped nodes in a 'skip' merge --- confluent_server/bin/confluentdbutil | 16 +++++++++++++--- .../confluent/config/configmanager.py | 16 +++++++++------- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/confluent_server/bin/confluentdbutil b/confluent_server/bin/confluentdbutil index b7c1e5c7..e74c2ab4 100755 --- a/confluent_server/bin/confluentdbutil +++ b/confluent_server/bin/confluentdbutil @@ -1,7 +1,7 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2017 Lenovo +# Copyright 2017,2024 Lenovo # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -72,9 +72,19 @@ if args[0] in ('restore', 'merge'): stateless = args[0] == 'restore' cfm.init(stateless) cfm.statelessmode = stateless + skipped = {'nodes': [], 'nodegroups': []} cfm.restore_db_from_directory( dumpdir, password, - merge="skip" if args[0] == 'merge' else False) + merge="skip" if args[0] == 'merge' else False, skipped=skipped) + if skipped['nodes']: + skippedn = ','.join(skipped['nodes']) + print('The following nodes were skipped during merge: ' + '{}'.format(skippedn)) + if skipped['nodegroups']: + skippedn = ','.join(skipped['nodegroups']) + print('The following node groups were skipped during merge: ' + '{}'.format(skippedn)) + cfm.statelessmode = False cfm.ConfigManager.wait_for_sync(True) if owner != 0: diff --git a/confluent_server/confluent/config/configmanager.py b/confluent_server/confluent/config/configmanager.py index 788c2d60..7702b97d 100644 --- a/confluent_server/confluent/config/configmanager.py +++ b/confluent_server/confluent/config/configmanager.py @@ -2519,19 +2519,21 @@ class ConfigManager(object): self._bg_sync_to_file() #TODO: wait for synchronization to suceed/fail??) - def _load_from_json(self, jsondata, sync=True, merge=False, keydata=None): + def _load_from_json(self, jsondata, sync=True, merge=False, keydata=None, skipped=None): self.inrestore = True try: - self._load_from_json_backend(jsondata, sync=True, merge=merge, keydata=keydata) + self._load_from_json_backend(jsondata, sync=True, merge=merge, keydata=keydata, skipped=skipped) finally: self.inrestore = False - def _load_from_json_backend(self, jsondata, sync=True, merge=False, keydata=None): + def _load_from_json_backend(self, jsondata, sync=True, merge=False, keydata=None, skipped=None): """Load fresh configuration data from jsondata :param jsondata: String of jsondata :return: """ + if not skipped: + skipped = {'nodes': None, 'nodegroups': None} dumpdata = json.loads(jsondata) tmpconfig = {} for confarea in _config_areas: @@ -2588,9 +2590,9 @@ class ConfigManager(object): if confarea not in tmpconfig: continue if confarea == 'nodes': - self.set_node_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata) + self.set_node_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata, skipped=skipped['nodes']) elif confarea == 'nodegroups': - self.set_group_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata) + self.set_group_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata, skipped=skipped['nodegroups']) elif confarea == 'usergroups': if merge: continue @@ -2934,7 +2936,7 @@ def _dump_keys(password, dojson=True): return keydata -def restore_db_from_directory(location, password, merge=False): +def restore_db_from_directory(location, password, merge=False, skipped=None): kdd = None try: with open(os.path.join(location, 'keys.json'), 'r') as cfgfile: @@ -2973,7 +2975,7 @@ def restore_db_from_directory(location, password, merge=False): raise with open(os.path.join(location, 'main.json'), 'r') as cfgfile: cfgdata = cfgfile.read() - ConfigManager(tenant=None)._load_from_json(cfgdata, merge=merge, keydata=kdd) + ConfigManager(tenant=None)._load_from_json(cfgdata, merge=merge, keydata=kdd, skipped=skipped) ConfigManager.wait_for_sync(True) From 82e0d9c434482688b9278178cf741db69ced07bd Mon Sep 17 00:00:00 2001 From: Jarrod Johnson Date: Wed, 14 Aug 2024 16:08:02 -0400 Subject: [PATCH 8/8] Rework ssh key init to reset key and use context management --- confluent_server/confluent/sshutil.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/confluent_server/confluent/sshutil.py b/confluent_server/confluent/sshutil.py index 1f8960d8..40512648 100644 --- a/confluent_server/confluent/sshutil.py +++ b/confluent_server/confluent/sshutil.py @@ -215,12 +215,13 @@ def initialize_root_key(generate, automation=False): suffix = 'rootpubkey' keyname = '/var/lib/confluent/public/site/ssh/{0}.{1}'.format( myname, suffix) + if authorized: + with open(keyname, 'w'): + pass for auth in authorized: - local_key = open(auth, 'r') - dest = open(keyname, 'a') - dest.write(local_key.read()) - local_key.close() - dest.close() + with open(auth, 'r') as local_key: + with open(keyname, 'a') as dest: + dest.write(local_key.read()) if os.path.exists(keyname): os.chmod(keyname, 0o644) os.chown(keyname, neededuid, -1)