diff --git a/confluent_client/bin/confetty b/confluent_client/bin/confetty index 3844f6b8..043881a4 100755 --- a/confluent_client/bin/confetty +++ b/confluent_client/bin/confetty @@ -76,6 +76,11 @@ import confluent.termhandler as termhandler import confluent.tlvdata as tlvdata import confluent.client as client +try: + unicode +except NameError: + unicode = str + conserversequence = '\x05c' # ctrl-e, c clearpowermessage = False @@ -299,8 +304,11 @@ currchildren = None def print_result(res): + global exitcode if 'errorcode' in res or 'error' in res: print(res['error']) + if 'errorcode' in res: + exitcode |= res['errorcode'] return if 'databynode' in res: print_result(res['databynode']) diff --git a/confluent_client/bin/nodeattrib b/confluent_client/bin/nodeattrib index 3489bb8b..63443a08 100755 --- a/confluent_client/bin/nodeattrib +++ b/confluent_client/bin/nodeattrib @@ -50,6 +50,9 @@ argparser.add_option('-c', '--clear', action='store_true', help='Clear attributes') argparser.add_option('-p', '--prompt', action='store_true', help='Prompt for attribute values interactively') +argparser.add_option('-m', '--maxnodes', type='int', + help='Prompt if trying to set attributes on more ' + 'than specified number of nodes') (options, args) = argparser.parse_args() @@ -87,6 +90,7 @@ if len(args) > 1: if oneval != twoval: print('Values did not match.') argassign[arg] = twoval + session.stop_if_noderange_over(noderange, options.maxnodes) exitcode=client.updateattrib(session,args,nodetype, noderange, options, argassign) try: # setting user output to what the user inputs diff --git a/confluent_client/bin/nodebmcreset b/confluent_client/bin/nodebmcreset index 7a2bd018..09a6221b 100755 --- a/confluent_client/bin/nodebmcreset +++ b/confluent_client/bin/nodebmcreset @@ -32,6 +32,8 @@ if path.startswith('/opt'): import confluent.client as client argparser = optparse.OptionParser(usage="Usage: %prog ") +argparser.add_option('-m', '--maxnodes', type='int', + help='Number of nodes to affect before prompting for confirmation') (options, args) = argparser.parse_args() try: noderange = args[0] @@ -43,8 +45,10 @@ session = client.Command() exitcode = 0 errorNodes = set([]) - +session.stop_if_noderange_over(noderange, options.maxnodes) success = session.simple_noderange_command(noderange, 'configuration/management_controller/reset', 'reset', key='state', errnodes=errorNodes) # = 0 if successful +if success != 0: + sys.exit(success) # Determine which nodes were successful and print them diff --git a/confluent_client/bin/nodeboot b/confluent_client/bin/nodeboot index 73c44b7b..a2c2f322 100755 --- a/confluent_client/bin/nodeboot +++ b/confluent_client/bin/nodeboot @@ -42,6 +42,10 @@ argparser.add_option('-p', '--persist', dest='persist', action='store_true', default=False, help='Request the boot device be persistent rather than ' 'one time') +argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to boot, ' + 'prompting if over the threshold') (options, args) = argparser.parse_args() @@ -54,8 +58,8 @@ except IndexError: sys.exit(1) client.check_globbing(noderange) bootdev = None -if len(sys.argv) > 2: - bootdev = sys.argv[2] +if len(args) > 1: + bootdev = args[1] if bootdev in ('net', 'pxe'): bootdev = 'network' session = client.Command() @@ -66,6 +70,7 @@ else: bootmode = 'uefi' errnodes = set([]) +session.stop_if_noderange_over(noderange, options.maxnodes) rc = session.simple_noderange_command(noderange, '/boot/nextdevice', bootdev, bootmode=bootmode, persistent=options.persist, diff --git a/confluent_client/bin/nodeconfig b/confluent_client/bin/nodeconfig index 89e65702..6493c6e1 100755 --- a/confluent_client/bin/nodeconfig +++ b/confluent_client/bin/nodeconfig @@ -54,6 +54,12 @@ argparser.add_option('-d', '--detail', dest='detail', action='store_true', default=False, help='Provide verbose information as available, such as ' 'help text and possible valid values') +argparser.add_option('-e', '--extra', dest='extra', + action='store_true', default=False, + help='Access extra configuration. Extra configuration is generally ' + 'reserved for unpopular or redundant options that may be slow to ' + 'read. Notably the IMM category on Lenovo settings is considered ' + 'to be extra configuration') argparser.add_option('-x', '--exclude', dest='exclude', action='store_true', default=False, help='Treat positional arguments as items to not ' @@ -68,6 +74,10 @@ argparser.add_option('-r', '--restoredefault', default=False, help='Restore the configuration of the node ' 'to factory default for given component. ' 'Currently only uefi is supported') +argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to configure, ' + 'prompting if over the threshold') (options, args) = argparser.parse_args() cfgpaths = { @@ -99,6 +109,7 @@ assignment = {} queryparms = {} printsys = [] printbmc = [] +printextbmc = [] printallbmc = False setsys = {} forceset = False @@ -173,11 +184,17 @@ def parse_config_line(arguments): del queryparms[path] except KeyError: pass - if not matchedparms: + if param.lower() == 'imm': + printextbmc.append(param) + options.extra = True + elif not matchedparms: printsys.append(param) elif param not in cfgpaths: if param.startswith('bmc.'): printbmc.append(param.replace('bmc.', '')) + elif param.lower().startswith('imm'): + options.extra = True + printextbmc.append(param) else: printsys.append(param) else: @@ -205,6 +222,7 @@ else: session = client.Command() rcode = 0 if options.restoredefault: + session.stop_if_noderange_over(noderange, options.maxnodes) if options.restoredefault.lower() in ( 'sys', 'system', 'uefi', 'bios'): for fr in session.update( @@ -225,6 +243,7 @@ if options.restoredefault: options.restoredefault)) sys.exit(1) if setmode: + session.stop_if_noderange_over(noderange, options.maxnodes) if options.exclude: sys.stderr.write('Cannot use exclude and assign at the same time\n') sys.exit(1) @@ -269,10 +288,15 @@ else: NullOpt(), queryparms[path]) if rc: sys.exit(rc) - if printbmc or printallbmc: - rcode = client.print_attrib_path( - '/noderange/{0}/configuration/management_controller/extended/all'.format(noderange), - session, printbmc, options, attrprefix='bmc.') + if printsys == 'all' or printextbmc or printbmc or printallbmc: + if printbmc or not printextbmc: + rcode = client.print_attrib_path( + '/noderange/{0}/configuration/management_controller/extended/all'.format(noderange), + session, printbmc, options, attrprefix='bmc.') + if options.extra: + rcode |= client.print_attrib_path( + '/noderange/{0}/configuration/management_controller/extended/extra'.format(noderange), + session, printextbmc, options) if printsys or options.exclude: if printsys == 'all': printsys = [] @@ -281,7 +305,6 @@ else: else: path = '/noderange/{0}/configuration/system/advanced'.format( noderange) - rcode = client.print_attrib_path(path, session, printsys, options) sys.exit(rcode) diff --git a/confluent_client/bin/nodeeventlog b/confluent_client/bin/nodeeventlog index 46d08604..ff9ad4e2 100755 --- a/confluent_client/bin/nodeeventlog +++ b/confluent_client/bin/nodeeventlog @@ -38,6 +38,10 @@ if sys.version_info[0] < 3: argparser = optparse.OptionParser( usage="Usage: %prog [options] noderange [clear]") +argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to clear if clearing log, ' + 'prompting if over the threshold') (options, args) = argparser.parse_args() try: noderange = args[0] @@ -46,11 +50,11 @@ except IndexError: sys.exit(1) client.check_globbing(noderange) deletemode = False -if len(sys.argv) > 3: +if len(args) > 2: argparser.print_help() sys.exit(1) -if len(sys.argv) == 3: - if sys.argv[2] == 'clear': +if len(args) == 2: + if args[1] == 'clear': deletemode = True else: argparser.print_help() @@ -88,6 +92,7 @@ def format_event(evt): if deletemode: func = session.delete + session.stop_if_noderange_over(noderange, options.maxnodes) else: func = session.read for rsp in func('/noderange/{0}/events/hardware/log'.format(noderange)): diff --git a/confluent_client/bin/nodefirmware b/confluent_client/bin/nodefirmware index f251f9cf..b7c05b15 100755 --- a/confluent_client/bin/nodefirmware +++ b/confluent_client/bin/nodefirmware @@ -59,6 +59,10 @@ argparser = optparse.OptionParser( "%prog [list][update [--backup ]]|[]") argparser.add_option('-b', '--backup', action='store_true', help='Target a backup bank rather than primary') +argparser.add_option('-m', '--maxnodes', type='int', + help='When updating, prompt if more than the specified ' + 'number of servers will be affected') + (options, args) = argparser.parse_args() upfile = None try: @@ -95,6 +99,7 @@ def get_update_progress(session, url): def update_firmware(session, filename): global exitcode + session.stop_if_noderange_over(noderange, options.maxnodes) output = sq.ScreenPrinter(noderange, session) nodeurls = {} filename = os.path.abspath(filename) diff --git a/confluent_client/bin/nodelicense b/confluent_client/bin/nodelicense index b602e733..d574b686 100755 --- a/confluent_client/bin/nodelicense +++ b/confluent_client/bin/nodelicense @@ -37,6 +37,10 @@ exitcode = 0 argparser = optparse.OptionParser( usage="Usage: " "%prog [list][install |save |delete ]") +argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to delete licenses from, ' + 'prompting if over the threshold') (options, args) = argparser.parse_args() upfile = None downdir = None @@ -52,7 +56,7 @@ try: delete = args[2] elif args[1] != 'list': argparser.print_help() - sys.exit(1) + sys.exit(1) except IndexError: argparser.print_help() sys.exit(1) @@ -138,6 +142,7 @@ try: elif downdir: save_licenses(session, downdir) elif delete: + session.stop_if_noderange_over(noderange, options.maxnodes) delete_license(session, delete) else: show_licenses(session) diff --git a/confluent_client/bin/nodepower b/confluent_client/bin/nodepower index 13b5e5d5..1caccdac 100755 --- a/confluent_client/bin/nodepower +++ b/confluent_client/bin/nodepower @@ -37,6 +37,11 @@ argparser = optparse.OptionParser( argparser.add_option('-p', '--showprevious', dest='previous', action='store_true', default=False, help='Show previous power state') +argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to change power state, ' + 'prompting if over the threshold') + (options, args) = argparser.parse_args() try: noderange = args[0] @@ -72,4 +77,4 @@ if options.previous: # add dictionary to session session.add_precede_dict(prev) -sys.exit(session.simple_noderange_command(noderange, '/power/state', setstate)) +sys.exit(session.simple_noderange_command(noderange, '/power/state', setstate, promptover=options.maxnodes)) \ No newline at end of file diff --git a/confluent_client/bin/noderemove b/confluent_client/bin/noderemove index 7cde4247..f94afee0 100755 --- a/confluent_client/bin/noderemove +++ b/confluent_client/bin/noderemove @@ -35,6 +35,10 @@ import confluent.client as client argparser = optparse.OptionParser( usage='''\n %prog noderange \n ''') +argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to delete, ' + 'prompting if over the threshold') (options, args) = argparser.parse_args() if len(args) != 1: argparser.print_help() @@ -43,6 +47,7 @@ noderange = args[0] client.check_globbing(noderange) session = client.Command() exitcode = 0 +session.stop_if_noderange_over(noderange, options.maxnodes) for r in session.delete('/noderange/{0}'.format(noderange)): if 'error' in r: sys.stderr.write(r['error'] + '\n') diff --git a/confluent_client/bin/nodereseat b/confluent_client/bin/nodereseat index 68dcc25f..72e3b848 100755 --- a/confluent_client/bin/nodereseat +++ b/confluent_client/bin/nodereseat @@ -32,6 +32,10 @@ if path.startswith('/opt'): import confluent.client as client argparser = optparse.OptionParser(usage="Usage: %prog ") +argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to reseat, ' + 'prompting if over the threshold') (options, args) = argparser.parse_args() try: noderange = args[0] @@ -43,7 +47,7 @@ session = client.Command() exitcode = 0 errorNodes = set([]) - +session.stop_if_noderange_over(noderange, options.maxnodes) success = session.simple_noderange_command(noderange, 'power/reseat', 'reseat', key='reseat', errnodes=errorNodes) # = 0 if successful # Determine which nodes were successful and print them diff --git a/confluent_client/bin/nodersync b/confluent_client/bin/nodersync index 3f31c64f..0c7ff568 100755 --- a/confluent_client/bin/nodersync +++ b/confluent_client/bin/nodersync @@ -42,6 +42,10 @@ def run(): argparser = optparse.OptionParser( usage="Usage: %prog location noderange:location", ) + argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to run rsync to, ' + 'prompting if over the threshold') argparser.add_option('-f', '-c', '--count', type='int', default=168, help='Number of nodes to concurrently rsync') # among other things, FD_SETSIZE limits. Besides, spawning too many @@ -64,7 +68,7 @@ def run(): pipedesc = {} pendingexecs = deque() exitcode = 0 - + c.stop_if_noderange_over(noderange, options.maxnodes) for exp in c.create('/noderange/{0}/attributes/expression'.format(noderange), {'expression': cmdstr}): if 'error' in exp: @@ -100,7 +104,7 @@ def run(): if desc['type'] == 'stdout': if node not in pernodeout: pernodeout[node] = '' - pernodeout[node] += stringify(data) + pernodeout[node] += client.stringify(data) if '\n' in pernodeout[node]: currout, pernodeout[node] = pernodeout[node].split('\n', 1) if currout: diff --git a/confluent_client/bin/noderun b/confluent_client/bin/noderun index 0ecd7626..73bbc80e 100755 --- a/confluent_client/bin/noderun +++ b/confluent_client/bin/noderun @@ -46,6 +46,10 @@ def run(): help='Number of commands to run at a time') argparser.add_option('-n', '--nonodeprefix', action='store_true', help='Do not prefix output with node names') + argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to run the command with, ' + 'prompting if over the threshold') # among other things, FD_SETSIZE limits. Besides, spawning too many # processes can be unkind for the unaware on memory pressure and such... argparser.disable_interspersed_args() @@ -63,7 +67,7 @@ def run(): pipedesc = {} pendingexecs = deque() exitcode = 0 - + c.stop_if_noderange_over(args[0], options.maxnodes) for exp in c.create('/noderange/{0}/attributes/expression'.format(args[0]), {'expression': cmdstr}): if 'error' in exp: diff --git a/confluent_client/bin/nodesetboot b/confluent_client/bin/nodesetboot index 9963208e..cb596148 100755 --- a/confluent_client/bin/nodesetboot +++ b/confluent_client/bin/nodesetboot @@ -43,7 +43,10 @@ argparser.add_option('-p', '--persist', dest='persist', action='store_true', argparser.add_option('-u', '--uefi', dest='uefi', action='store_true', default=True, help='Request UEFI style boot (rather than BIOS)') - +argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to modify next boot device, ' + 'prompting if over the threshold') (options, args) = argparser.parse_args() try: @@ -63,6 +66,7 @@ if options.biosmode: bootmode = 'bios' else: bootmode = 'uefi' +session.stop_if_noderange_over(noderange, options.maxnodes) sys.exit(session.simple_noderange_command(noderange, '/boot/nextdevice', bootdev, bootmode=bootmode, persistent=options.persist)) diff --git a/confluent_client/bin/nodeshell b/confluent_client/bin/nodeshell index 6d1fe138..8899a013 100755 --- a/confluent_client/bin/nodeshell +++ b/confluent_client/bin/nodeshell @@ -46,6 +46,10 @@ def run(): help='Number of commands to run at a time') argparser.add_option('-n', '--nonodeprefix', action='store_true', help='Do not prefix output with node names') + argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to run remote ssh command to, ' + 'prompting if over the threshold') # among other things, FD_SETSIZE limits. Besides, spawning too many # processes can be unkind for the unaware on memory pressure and such... argparser.disable_interspersed_args() @@ -55,7 +59,7 @@ def run(): sys.exit(1) client.check_globbing(args[0]) concurrentprocs = options.count - c = client.Command() + c = client.Command() cmdstr = " ".join(args[1:]) currprocs = 0 @@ -64,7 +68,7 @@ def run(): pendingexecs = deque() exitcode = 0 - + c.stop_if_noderange_over(args[0], options.maxnodes) for exp in c.create('/noderange/{0}/attributes/expression'.format(args[0]), {'expression': cmdstr}): if 'error' in exp: diff --git a/confluent_client/bin/nodestorage b/confluent_client/bin/nodestorage index e1170d15..862417e1 100644 --- a/confluent_client/bin/nodestorage +++ b/confluent_client/bin/nodestorage @@ -63,10 +63,16 @@ def _print_cfg(scfg): sys.stderr.write(e['error'] + '\n') exitcode = e.get('errorcode', 1) for node in e.get('databynode', {}): + curr = e['databynode'][node] + if 'error' in curr: + if 'no available drives' in curr['error']: + curr['error'] += ' (drives must be in unconfigured state to be available, they must not be in jbod or online state)' + sys.stderr.write('{0}: {1}\n'.format(node, curr['error'])) + exitcode = curr.get('errorcode', 1) + continue if node not in storagebynode: storagebynode[node] = {'disks': [], 'arrays': [], 'volumes': []} - curr = e['databynode'][node] storagebynode[node][curr['type'] + 's'].append(curr) for node in storagebynode: for disk in sorted(storagebynode[node]['disks'], @@ -108,6 +114,7 @@ def createstorage(noderange, options, args): sys.stderr.write('-r and -d are required arguments to create array\n') sys.exit(1) session = client.Command() + session.stop_if_noderange_over(noderange, options.maxnodes) names = options.name if names is None: names = ''.join(args) @@ -132,6 +139,7 @@ def deletestorage(noderange, options, args): else: names = options.name session = client.Command() + session.stop_if_noderange_over(noderange, options.maxnodes) for rsp in session.delete( '/noderange/{0}/configuration/storage/volumes/{1}'.format( noderange, names)): @@ -162,6 +170,7 @@ def setdisk(noderange, options, args): sys.stderr.write('diskset requires valid state as argument (hotspare, jbod, unconfigured)\n') sys.exit(1) session = client.Command() + session.stop_if_noderange_over(noderange, options.maxnodes) scfg = session.update('/noderange/{0}/configuration/storage/disks/{1}'.format(noderange, names), {'state': args[0]}) _print_cfg(scfg) @@ -202,6 +211,10 @@ def main(): help='Comma separated list of stripsizes to use when creating volumes. ' 'This value is in kilobytes. The default behavior is to allow the ' 'storage controller to decide.') + argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to configure storage on, ' + 'prompting if over the threshold') (options, args) = argparser.parse_args() if len(args) == 1: args.append('show') diff --git a/confluent_client/bin/nodesupport b/confluent_client/bin/nodesupport index 135a16df..545ef85e 100644 --- a/confluent_client/bin/nodesupport +++ b/confluent_client/bin/nodesupport @@ -64,7 +64,7 @@ def printerror(res, node=None): -def download_servicedata(noderange, media): +def download_servicedata(noderange, media, options): global exitcode session = client.Command() output = sq.ScreenPrinter(noderange, session) @@ -73,6 +73,7 @@ def download_servicedata(noderange, media): upargs = {'filename': filename} noderrs = {} nodeurls = {} + session.stop_if_noderange_over(noderange, options.maxnodes) for res in session.create(resource, upargs): if 'created' not in res: for nodename in res.get('databynode', ()): @@ -121,6 +122,10 @@ def main(): 'management server (the confluent server if running remote, ' 'and the collective.manager if in collective)\n' '\n\nSee `man %prog` for more info.\n') + argparser.add_option('-m', '--maxnodes', type='int', + help='Specify a maximum number of ' + 'nodes to download diagnostic data from, ' + 'prompting if over the threshold') (options, args) = argparser.parse_args() media = None try: @@ -142,6 +147,6 @@ def main(): except KeyError: argparser.print_help() sys.exit(1) - handler(noderange, media) + handler(noderange, media, options) if __name__ == '__main__': main() diff --git a/confluent_client/confluent/client.py b/confluent_client/confluent/client.py index c70662ec..3ee2712c 100644 --- a/confluent_client/confluent/client.py +++ b/confluent_client/confluent/client.py @@ -39,6 +39,10 @@ _attraliases = { 'bmcpass': 'secret.hardwaremanagementpassword', } +try: + input = raw_input +except NameError: + pass def stringify(instr): # Normalize unicode and bytes to 'str', correcting for @@ -219,7 +223,7 @@ class Command(object): return rc def simple_noderange_command(self, noderange, resource, input=None, - key=None, errnodes=None, **kwargs): + key=None, errnodes=None, promptover=None, **kwargs): try: self._currnoderange = noderange rc = 0 @@ -235,6 +239,7 @@ class Command(object): noderange, resource)): rc = self.handle_results(ikey, rc, res, errnodes) else: + self.stop_if_noderange_over(noderange, promptover) kwargs[ikey] = input for res in self.update('/noderange/{0}/{1}'.format( noderange, resource), kwargs): @@ -244,6 +249,33 @@ class Command(object): except KeyboardInterrupt: cprint('') return 0 + + def stop_if_noderange_over(self, noderange, maxnodes): + if maxnodes is None: + return + nsize = self.get_noderange_size(noderange) + if nsize > maxnodes: + if nsize == 1: + nodename = list(self.read( + '/noderange/{0}/nodes/'.format(noderange)))[0].get('item', {}).get('href', None) + nodename = nodename[:-1] + p = input('Command is about to affect node {0}, continue (y/n)? '.format(nodename)) + else: + p = input('Command is about to affect {0} nodes, continue (y/n)? '.format(nsize)) + if p.lower() != 'y': + sys.stderr.write('Aborting at user request\n') + sys.exit(1) + raise Exception("Aborting at user request") + + + def get_noderange_size(self, noderange): + numnodes = 0 + for node in self.read('/noderange/{0}/nodes/'.format(noderange)): + if node.get('item', {}).get('href', None): + numnodes += 1 + else: + raise Exception("Error trying to size noderange {0}".format(noderange)) + return numnodes def simple_nodegroups_command(self, noderange, resource, input=None, key=None, **kwargs): try: @@ -344,12 +376,8 @@ class Command(object): if fingerprint == khf[hostid]: return else: - try: - replace = raw_input( - "MISMATCHED CERTIFICATE DATA, ACCEPT NEW? (y/n):") - except NameError: - replace = input( - "MISMATCHED CERTIFICATE DATA, ACCEPT NEW? (y/n):") + replace = input( + "MISMATCHED CERTIFICATE DATA, ACCEPT NEW? (y/n):") if replace not in ('y', 'Y'): raise Exception("BAD CERTIFICATE") cprint('Adding new key for %s:%s' % (server, port)) @@ -401,6 +429,10 @@ def printattributes(session, requestargs, showtype, nodetype, noderange, options path = '/{0}/{1}/attributes/{2}'.format(nodetype, noderange, showtype) return print_attrib_path(path, session, requestargs, options) +def _sort_attrib(k): + if isinstance(k[1], dict) and k[1].get('sortid', None) is not None: + return k[1]['sortid'] + return k[0] def print_attrib_path(path, session, requestargs, options, rename=None, attrprefix=None): exitcode = 0 @@ -411,9 +443,7 @@ def print_attrib_path(path, session, requestargs, options, rename=None, attrpref exitcode = 1 continue for node in sorted(res['databynode']): - for attr, val in sorted( - res['databynode'][node].items(), - key=lambda k: k[1].get('sortid', k[0]) if isinstance(k[1], dict) else k[0]): + for attr, val in sorted(res['databynode'][node].items(), key=_sort_attrib): if attr == 'error': sys.stderr.write('{0}: Error: {1}\n'.format(node, val)) continue diff --git a/confluent_client/confluent_env.sh b/confluent_client/confluent_env.sh index 090b2fc9..f9b06644 100644 --- a/confluent_client/confluent_env.sh +++ b/confluent_client/confluent_env.sh @@ -46,9 +46,8 @@ alias nodelicense='CURRENT_CMDLINE=$(HISTTIMEFORMAT= builtin history 1); export _confluent_get_args() { CMPARGS=($COMP_LINE) - NUMARGS=${#CMPARGS[@]} - if [ "${COMP_WORDS[-1]}" == '' ]; then - NUMARGS=$((NUMARGS+1)) + NUMARGS=$((COMP_CWORD+1)) + if [ "${COMP_WORDS[COMP_CWORD]}" == '' ]; then CMPARGS+=("") fi GENNED="" @@ -75,7 +74,7 @@ function _confluent_generic_completion() { _confluent_get_args if [ $NUMARGS -ge 3 ] && [ ! -z "$GENNED" ]; then - COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[-1]})) + COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[COMP_CWORD]})) fi if [ $NUMARGS -lt 3 ]; then _confluent_nr_completion @@ -111,7 +110,7 @@ _confluent_nodemedia_completion() return fi if [ $NUMARGS -ge 3 ] && [ ! -z "$GENNED" ]; then - COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[-1]})) + COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[COMP_CWORD]})) return; fi if [ $NUMARGS -lt 3 ]; then @@ -124,7 +123,7 @@ _confluent_nodefirmware_completion() { _confluent_get_args if [ $NUMARGS == 3 ]; then - COMPREPLY=($(compgen -W "list update" -- ${COMP_WORDS[-1]})) + COMPREPLY=($(compgen -W "list update" -- ${COMP_WORDS[COMP_CWORD]})) return; fi if [ $NUMARGS -gt 3 ] && [ ${CMPARGS[2]} == 'update' ]; then @@ -142,7 +141,7 @@ _confluent_nodeshell_completion() { _confluent_get_args if [ $NUMARGS == 3 ]; then - COMPREPLY=($(compgen -c -- ${COMP_WORDS[-1]})) + COMPREPLY=($(compgen -c -- ${COMP_WORDS[COMP_CWORD]})) return fi if [ $NUMARGS -gt 3 ]; then @@ -160,7 +159,7 @@ _confluent_nodelicense_completion() { _confluent_get_args if [ $NUMARGS == 3 ]; then - COMPREPLY=($(compgen -W "install list save delete" -- ${COMP_WORDS[-1]})) + COMPREPLY=($(compgen -W "install list save delete" -- ${COMP_WORDS[COMP_CWORD]})) return; fi if [ $NUMARGS == 4 ] && [ ${CMPARGS[2]} == 'install' ]; then @@ -183,7 +182,7 @@ _confluent_nodesupport_completion() { _confluent_get_args if [ $NUMARGS == 3 ]; then - COMPREPLY=($(compgen -W "servicedata" -- ${COMP_WORDS[-1]})) + COMPREPLY=($(compgen -W "servicedata" -- ${COMP_WORDS[COMP_CWORD]})) return; fi if [ $NUMARGS == 4 ] && [ ${CMPARGS[2]} == 'servicedata' ]; then @@ -210,41 +209,36 @@ _confluent_nn_completion() if [ $NUMARGS -gt 2 ]; then return; fi - INPUT=${COMP_WORDS[-1]} + INPUT=${COMP_WORDS[COMP_CWORD]} INPUT=${INPUT##*,-} INPUT=${INPUT##*,} INPUT=${INPUT##*@} PREFIX="" - if [ "$INPUT" != "${COMP_WORDS[-1]}" ]; then - PREFIX=${COMP_WORDS[-1]} + if [ "$INPUT" != "${COMP_WORDS[COMP_CWORD]}" ]; then + PREFIX=${COMP_WORDS[COMP_CWORD]} PREFIX=$(echo $PREFIX | sed -e 's/,[^,@-]*$/,/' -e 's/,-[^,@]*$/,-/' -e 's/@[^,@]*/@/') fi - COMPREPLY=($(compgen -W "$(nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[-1]}")) + COMPREPLY=($(compgen -W "$(nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[COMP_CWORD]}")) } _confluent_nr_completion() { CMPARGS=($COMP_LINE) - NUMARGS=${#CMPARGS[@]} - if [ "${COMP_WORDS[-1]}" == '' ]; then - NUMARGS=$((NUMARGS+1)) - fi _confluent_get_args if [ $NUMARGS -gt 2 ]; then return; fi - INPUT=${COMP_WORDS[-1]} + INPUT=${COMP_WORDS[COMP_CWORD]} INPUT=${INPUT##*,-} INPUT=${INPUT##*,} INPUT=${INPUT##*@} PREFIX="" - if [ "$INPUT" != "${COMP_WORDS[-1]}" ]; then - PREFIX=${COMP_WORDS[-1]} + if [ "$INPUT" != "${COMP_WORDS[COMP_CWORD]}" ]; then + PREFIX=${COMP_WORDS[COMP_CWORD]} PREFIX=$(echo $PREFIX | sed -e 's/,[^,@-]*$/,/' -e 's/,-[^,@]*$/,-/' -e 's/@[^,@]*/@/') fi - #COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/;nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[-1]}")) - COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/;nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[-1]}")) + COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/;nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[COMP_CWORD]}")) } _confluent_ng_completion() { @@ -252,17 +246,17 @@ _confluent_ng_completion() if [ $NUMARGS -gt 2 ]; then return; fi - INPUT=${COMP_WORDS[-1]} + INPUT=${COMP_WORDS[COMP_CWORD]} INPUT=${INPUT##*,-} INPUT=${INPUT##*,} INPUT=${INPUT##*@} PREFIX="" - if [ "$INPUT" != "${COMP_WORDS[-1]}" ]; then - PREFIX=${COMP_WORDS[-1]} + if [ "$INPUT" != "${COMP_WORDS[COMP_CWORD]}" ]; then + PREFIX=${COMP_WORDS[COMP_CWORD]} PREFIX=$(echo $PREFIX | sed -e 's/,[^,@-]*$/,/' -e 's/,-[^,@]*$/,-/' -e 's/@[^,@]*/@/') fi - COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/)" -- "${COMP_WORDS[-1]}")) + COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/)" -- "${COMP_WORDS[COMP_CWORD]}")) } complete -F _confluent_nodeattrib_completion nodeattrib complete -F _confluent_nodeattrib_completion nodegroupattrib diff --git a/confluent_client/doc/man/nodeconfig.ronn b/confluent_client/doc/man/nodeconfig.ronn index d504fa5f..5bbdc30a 100644 --- a/confluent_client/doc/man/nodeconfig.ronn +++ b/confluent_client/doc/man/nodeconfig.ronn @@ -22,6 +22,11 @@ given as a node expression, as documented in the man page for nodeattribexpressi If combined with `-x`, will show all differing values except those indicated by `-x` +* `-e`, `--extra`: + Read settings that are generally not needed, but may be slow to retrieve. + Notably this includes the IMM category of Lenovo systems. The most popular + IMM settings are available through faster 'bmc' attributes. + * `-x`, `--exclude`: Rather than listing only the specified configuration parameters, list all attributes except for the specified ones diff --git a/confluent_client/doc/man/nodefirmware.ronn b/confluent_client/doc/man/nodefirmware.ronn index 6a9a60b0..12234686 100644 --- a/confluent_client/doc/man/nodefirmware.ronn +++ b/confluent_client/doc/man/nodefirmware.ronn @@ -13,7 +13,11 @@ nodefirmware(8) -- Report firmware information on confluent nodes will retrieve all firmware, but can be directed to fetch specific firmware by calling out the name of the firmware (e.g. uefi or xcc) or request reading only core firmware firmware by using the word 'core', which is generally a quicker -operation. +operation. Different hardwaremanagement.method indicated plugins may have +different capabilities available. For example, the 'core' distinction may +not be relevant to redfish. Additionally, the Lenovo XCC makes certain +information available over IPMI that is not otherwise available (for example +the FPGA version where applicable). In the update form, it accepts a single file and attempts to update it using the out of band facilities. Firmware updates can end in one of three states: diff --git a/confluent_client/doc/man/nodesupport.ronn b/confluent_client/doc/man/nodesupport.ronn index ee80bf67..a8d93267 100644 --- a/confluent_client/doc/man/nodesupport.ronn +++ b/confluent_client/doc/man/nodesupport.ronn @@ -7,7 +7,7 @@ nodesupport(8) -- Utilities for interacting with vendor support ## DESCRIPTION -`nodesupport` provides capabilities associated with interactiong with support. +`nodesupport` provides capabilities associated with interacting with support. Currently it only has the `servicedata` subcommand. `servicedata` takes an argument that is either a directory name (that can be used for a single node or multiple nodes) or a file name (only to be used with single node noderange). @@ -16,6 +16,9 @@ connects to the managed system, so it will download to the remote system if runn remotely and will download to the collective.manager indicated system if running in collective mode. +Note that due to vendor filename requirements, any filename may have vendor +specific suffixes added to any file produced. + ## EXAMPLES * Download support data from a single node to a specific filename diff --git a/confluent_server/bin/collective b/confluent_server/bin/collective index fee56ab5..e993e168 100644 --- a/confluent_server/bin/collective +++ b/confluent_server/bin/collective @@ -88,6 +88,9 @@ def show_collective(): s = client.Command().connection tlvdata.send(s, {'collective': {'operation': 'show'}}) res = tlvdata.recv(s) + if 'error' in res: + print(res['error']) + return if 'error' in res['collective']: print(res['collective']['error']) return diff --git a/confluent_server/bin/confluent b/confluent_server/bin/confluent index 62d6ab3d..db66e587 100755 --- a/confluent_server/bin/confluent +++ b/confluent_server/bin/confluent @@ -32,7 +32,7 @@ import confluent.main import multiprocessing if __name__ == '__main__': multiprocessing.freeze_support() - confluent.main.run() + confluent.main.run(sys.argv) #except: # pass #p.disable() diff --git a/confluent_server/bin/confluentcertutil.py b/confluent_server/bin/confluentcertutil.py index c121224a..ea4477cd 100644 --- a/confluent_server/bin/confluentcertutil.py +++ b/confluent_server/bin/confluentcertutil.py @@ -7,7 +7,7 @@ import tempfile def get_openssl_conf_location(): if exists('/etc/pki/tls/openssl.cnf'): return '/etc/pki/tls/openssl.cnf' - elif exists('/etc/ssl/openssl.cnf'); + elif exists('/etc/ssl/openssl.cnf'): return '/etc/ssl/openssl.cnf' else: raise Exception("Cannot find openssl config file") diff --git a/confluent_server/builddeb b/confluent_server/builddeb index fd9a1ae9..e3e2f82a 100755 --- a/confluent_server/builddeb +++ b/confluent_server/builddeb @@ -3,8 +3,12 @@ cd `dirname $0` PKGNAME=$(basename $(pwd)) DPKGNAME=$(basename $(pwd) | sed -e s/_/-/) OPKGNAME=$(basename $(pwd) | sed -e s/_/-/) +PYEXEC=python3 +DSCARGS="--with-python3=True --with-python2=False" if grep wheezy /etc/os-release; then DPKGNAME=python-$DPKGNAME + PYEXEC=python + DSCARGS="" fi cd .. mkdir -p /tmp/confluent # $DPKGNAME @@ -24,15 +28,15 @@ install-scripts=/opt/confluent/bin package=$DPKGNAME EOF -python setup.py sdist > /dev/null 2>&1 -py2dsc dist/*.tar.gz +$PYEXEC setup.py sdist > /dev/null 2>&1 +py2dsc $DSCARGS dist/*.tar.gz shopt -s extglob cd deb_dist/!(*.orig)/ if [ "$OPKGNAME" = "confluent-server" ]; then if grep wheezy /etc/os-release; then sed -i 's/^\(Depends:.*\)/\1, python-confluent-client, python-lxml, python-eficompressor, python-pycryptodomex, python-dateutil, python-pyopenssl/' debian/control else - sed -i 's/^\(Depends:.*\)/\1, confluent-client, python-lxml, python-eficompressor, python-pycryptodome, python-dateutil/' debian/control + sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket/' debian/control fi if grep wheezy /etc/os-release; then echo 'confluent_client python-confluent-client' >> debian/pydist-overrides diff --git a/confluent_server/confluent/auth.py b/confluent_server/confluent/auth.py index bd0ef5cb..05eeacb6 100644 --- a/confluent_server/confluent/auth.py +++ b/confluent_server/confluent/auth.py @@ -27,6 +27,8 @@ from fnmatch import fnmatch import hashlib import hmac import multiprocessing +import os +import pwd import confluent.userutil as userutil import confluent.util as util pam = None @@ -58,9 +60,9 @@ _allowedbyrole = { '/node*/configuration/*', ], 'update': [ - '/discovery/*', + '/discovery/*', '/networking/macs/rescan', - '/node*/power/state', + '/node*/power/state', '/node*/power/reseat', '/node*/attributes/*', '/node*/media/*tach', @@ -98,17 +100,6 @@ _deniedbyrole = { } -def _prune_passcache(): - # This function makes sure we don't remember a passphrase in memory more - # than 10 seconds - while True: - curtime = time.time() - for passent in _passcache.iterkeys(): - if passent[2] < curtime - 90: - del _passcache[passent] - eventlet.sleep(90) - - def _get_usertenant(name, tenant=False): """_get_usertenant @@ -268,12 +259,36 @@ def check_user_passphrase(name, passphrase, operation=None, element=None, tenant _passcache[(user, tenant)] = hashlib.sha256(passphrase).digest() return authorize(user, element, tenant, operation) if pam: - pammy = pam.pam() - usergood = pammy.authenticate(user, passphrase) - del pammy + pwe = None + try: + pwe = pwd.getpwnam(user) + except KeyError: + #pam won't work if the user doesn't exist, don't go further + eventlet.sleep(0.05) # stall even on test for existence of a username + return None + if os.getuid() != 0: + # confluent is running with reduced privilege, however, pam_unix refuses + # to let a non-0 user check anothers password. + # We will fork and the child will assume elevated privilege to + # get unix_chkpwd helper to enable checking /etc/shadow + pid = os.fork() + if not pid: + usergood = False + try: + # we change to the uid we are trying to authenticate as, because + # pam_unix uses unix_chkpwd which reque + os.setuid(pwe.pw_uid) + usergood = pam.authenticate(user, passphrase, service=_pamservice) + finally: + os._exit(0 if usergood else 1) + usergood = os.waitpid(pid, 0)[1] == 0 + else: + # We are running as root, we don't need to fork in order to authenticate the + # user + usergood = pam.authenticate(user, passphrase, service=_pamservice) if usergood: _passcache[(user, tenant)] = hashlib.sha256(passphrase).digest() - return authorize(user, element, tenant, operation, skipuserobj=False) + return authorize(user, element, tenant, operation, skipuserobj=False) eventlet.sleep(0.05) # stall even on test for existence of a username return None diff --git a/confluent_server/confluent/collective/manager.py b/confluent_server/confluent/collective/manager.py index c1d20427..7a032203 100644 --- a/confluent_server/confluent/collective/manager.py +++ b/confluent_server/confluent/collective/manager.py @@ -73,20 +73,12 @@ def connect_to_leader(cert=None, name=None, leader=None): with cfm._initlock: banner = tlvdata.recv(remote) # the banner vers = banner.split()[2] - pvers = 0 - reqver = 4 - if vers == b'v0': - pvers = 2 - elif vers == b'v1': - pvers = 4 - if sys.version_info[0] < 3: - pvers = 2 - reqver = 2 + if vers != b'v2': + raise Exception('This instance only supports protocol 2, synchronize versions between collective members') tlvdata.recv(remote) # authpassed... 0.. if name is None: name = get_myname() tlvdata.send(remote, {'collective': {'operation': 'connect', - 'protover': reqver, 'name': name, 'txcount': cfm._txcount}}) keydata = tlvdata.recv(remote) @@ -160,15 +152,15 @@ def connect_to_leader(cert=None, name=None, leader=None): raise currentleader = leader #spawn this as a thread... - follower = eventlet.spawn(follow_leader, remote, pvers) + follower = eventlet.spawn(follow_leader, remote, leader) return True -def follow_leader(remote, proto): +def follow_leader(remote, leader): global currentleader cleanexit = False try: - cfm.follow_channel(remote, proto) + cfm.follow_channel(remote) except greenlet.GreenletExit: cleanexit = True finally: @@ -176,8 +168,8 @@ def follow_leader(remote, proto): log.log({'info': 'Previous following cleanly closed', 'subsystem': 'collective'}) return - log.log({'info': 'Current leader has disappeared, restarting ' - 'collective membership', 'subsystem': 'collective'}) + log.log({'info': 'Current leader ({0}) has disappeared, restarting ' + 'collective membership'.format(leader), 'subsystem': 'collective'}) # The leader has folded, time to startup again... cfm.stop_following() currentleader = None @@ -430,7 +422,6 @@ def handle_connection(connection, cert, request, local=False): tlvdata.send(connection, collinfo) if 'connect' == operation: drone = request['name'] - folver = request.get('protover', 2) droneinfo = cfm.get_collective_member(drone) if not (droneinfo and util.cert_matches(droneinfo['fingerprint'], cert)): @@ -479,7 +470,7 @@ def handle_connection(connection, cert, request, local=False): connection.sendall(cfgdata) #tlvdata.send(connection, {'tenants': 0}) # skip the tenants for now, # so far unused anyway - if not cfm.relay_slaved_requests(drone, connection, folver): + if not cfm.relay_slaved_requests(drone, connection): if not retrythread: # start a recovery if everyone else seems # to have disappeared retrythread = eventlet.spawn_after(30 + random.random(), diff --git a/confluent_server/confluent/config/attributes.py b/confluent_server/confluent/config/attributes.py index c2298ce2..52cc43ee 100644 --- a/confluent_server/confluent/config/attributes.py +++ b/confluent_server/confluent/config/attributes.py @@ -280,7 +280,7 @@ node = { 'console.method': { 'description': ('Indicate the method used to access the console of ' 'the managed node.'), - 'validvalues': ('ssh', 'ipmi'), + 'validvalues': ('ssh', 'ipmi', 'tsmsol'), }, # 'virtualization.host': { # 'description': ('Hypervisor where this node does/should reside'), diff --git a/confluent_server/confluent/config/configmanager.py b/confluent_server/confluent/config/configmanager.py index 0fbdcf7d..440eaf6c 100644 --- a/confluent_server/confluent/config/configmanager.py +++ b/confluent_server/confluent/config/configmanager.py @@ -71,6 +71,7 @@ import eventlet.green.select as select import eventlet.green.threading as gthread import fnmatch import json +import msgpack import operator import os import random @@ -101,10 +102,6 @@ _cfgstore = None _pendingchangesets = {} _txcount = 0 _hasquorum = True -if sys.version_info[0] >= 3: - lowestver = 4 -else: - lowestver = 2 _attraliases = { 'bmc': 'hardwaremanagement.manager', @@ -317,8 +314,8 @@ def exec_on_leader(function, *args): while xid in _pendingchangesets: xid = confluent.util.stringify(base64.b64encode(os.urandom(8))) _pendingchangesets[xid] = event.Event() - rpcpayload = cPickle.dumps({'function': function, 'args': args, - 'xid': xid}, protocol=cfgproto) + rpcpayload = msgpack.packb({'function': function, 'args': args, + 'xid': xid}, use_bin_type=False) rpclen = len(rpcpayload) cfgleader.sendall(struct.pack('!Q', rpclen)) cfgleader.sendall(rpcpayload) @@ -328,6 +325,11 @@ def exec_on_leader(function, *args): def exec_on_followers(fnname, *args): + pushes = eventlet.GreenPool() + # Check health of collective prior to attempting + for _ in pushes.starmap( + _push_rpc, [(cfgstreams[s], b'') for s in cfgstreams]): + pass if len(cfgstreams) < (len(_cfgstore['collective']) // 2): # the leader counts in addition to registered streams raise exc.DegradedCollective() @@ -338,8 +340,8 @@ def exec_on_followers_unconditional(fnname, *args): global _txcount pushes = eventlet.GreenPool() _txcount += 1 - payload = cPickle.dumps({'function': fnname, 'args': args, - 'txcount': _txcount}, protocol=lowestver) + payload = msgpack.packb({'function': fnname, 'args': args, + 'txcount': _txcount}, use_bin_type=False) for _ in pushes.starmap( _push_rpc, [(cfgstreams[s], payload) for s in cfgstreams]): pass @@ -386,9 +388,15 @@ def init_masterkey(password=None, autogen=True): def _push_rpc(stream, payload): with _rpclock: - stream.sendall(struct.pack('!Q', len(payload))) - if len(payload): - stream.sendall(payload) + try: + stream.sendall(struct.pack('!Q', len(payload))) + if len(payload): + stream.sendall(payload) + return True + except Exception: + logException() + del cfgstreams[stream] + stream.close() def decrypt_value(cryptvalue, @@ -554,14 +562,9 @@ def set_global(globalname, value, sync=True): ConfigManager._bg_sync_to_file() cfgstreams = {} -def relay_slaved_requests(name, listener, vers): +def relay_slaved_requests(name, listener): global cfgleader global _hasquorum - global lowestver - if vers > 2 and sys.version_info[0] < 3: - vers = 2 - if vers < lowestver: - lowestver = vers pushes = eventlet.GreenPool() if name not in _followerlocks: _followerlocks[name] = gthread.RLock() @@ -578,11 +581,18 @@ def relay_slaved_requests(name, listener, vers): lh = StreamHandler(listener) _hasquorum = len(cfgstreams) >= ( len(_cfgstore['collective']) // 2) - payload = cPickle.dumps({'quorum': _hasquorum}, protocol=lowestver) - for _ in pushes.starmap( - _push_rpc, - [(cfgstreams[s], payload) for s in cfgstreams]): - pass + _newquorum = None + while _hasquorum != _newquorum: + if _newquorum is not None: + _hasquorum = _newquorum + payload = msgpack.packb({'quorum': _hasquorum}, use_bin_type=False) + for _ in pushes.starmap( + _push_rpc, + [(cfgstreams[s], payload) for s in cfgstreams]): + pass + _newquorum = len(cfgstreams) >= ( + len(_cfgstore['collective']) // 2) + _hasquorum = _newquorum if _hasquorum and _pending_collective_updates: apply_pending_collective_updates() msg = lh.get_next_msg() @@ -597,15 +607,21 @@ def relay_slaved_requests(name, listener, vers): if not nrpc: raise Exception('Truncated client error') rpc += nrpc - rpc = cPickle.loads(rpc) + rpc = msgpack.unpackb(rpc, raw=False) exc = None + if not (rpc['function'].startswith('_rpc_') or rpc['function'].endswith('_collective_member')): + raise Exception('Unsupported function {0} called'.format(rpc['function'])) try: globals()[rpc['function']](*rpc['args']) + except ValueError as ve: + exc = ['ValueError', str(ve)] except Exception as e: - exc = e + exc = ['Exception', str(e)] if 'xid' in rpc: - _push_rpc(listener, cPickle.dumps({'xid': rpc['xid'], - 'exc': exc}, protocol=vers)) + res = _push_rpc(listener, msgpack.packb({'xid': rpc['xid'], + 'exc': exc}, use_bin_type=False)) + if not res: + break try: msg = lh.get_next_msg() except Exception: @@ -622,7 +638,7 @@ def relay_slaved_requests(name, listener, vers): if cfgstreams: _hasquorum = len(cfgstreams) >= ( len(_cfgstore['collective']) // 2) - payload = cPickle.dumps({'quorum': _hasquorum}, protocol=lowestver) + payload = msgpack.packb({'quorum': _hasquorum}, use_bin_type=False) for _ in pushes.starmap( _push_rpc, [(cfgstreams[s], payload) for s in cfgstreams]): @@ -650,7 +666,9 @@ class StreamHandler(object): if confluent.util.monotonic_time() > self.expiry: return None if confluent.util.monotonic_time() > self.keepalive: - _push_rpc(self.sock, b'') # nulls are a keepalive + res = _push_rpc(self.sock, b'') # nulls are a keepalive + if not res: + return None self.keepalive = confluent.util.monotonic_time() + 20 self.expiry = confluent.util.monotonic_time() + 60 msg = self.sock.recv(8) @@ -662,19 +680,15 @@ class StreamHandler(object): self.sock = None -def stop_following(replacement=None, proto=2): +def stop_following(replacement=None): with _leaderlock: global cfgleader - global cfgproto if cfgleader and not isinstance(cfgleader, bool): try: cfgleader.close() except Exception: pass cfgleader = replacement - if proto > 2 and sys.version_info[0] < 3: - proto = 2 - cfgproto = proto def stop_leading(): for stream in list(cfgstreams): @@ -732,15 +746,14 @@ def commit_clear(): ConfigManager._bg_sync_to_file() cfgleader = None -cfgproto = 2 -def follow_channel(channel, proto=2): +def follow_channel(channel): global _txcount global _hasquorum try: stop_leading() - stop_following(channel, proto) + stop_following(channel) lh = StreamHandler(channel) msg = lh.get_next_msg() while msg: @@ -752,22 +765,31 @@ def follow_channel(channel, proto=2): if not nrpc: raise Exception('Truncated message error') rpc += nrpc - rpc = cPickle.loads(rpc) + rpc = msgpack.unpackb(rpc, raw=False) if 'txcount' in rpc: _txcount = rpc['txcount'] if 'function' in rpc: + if not (rpc['function'].startswith('_true') or rpc['function'].startswith('_rpc')): + raise Exception("Received unsupported function call: {0}".format(rpc['function'])) try: globals()[rpc['function']](*rpc['args']) except Exception as e: print(repr(e)) if 'xid' in rpc and rpc['xid']: if rpc.get('exc', None): - _pendingchangesets[rpc['xid']].send_exception(rpc['exc']) + exctype, excstr = rpc['exc'] + if exctype == 'ValueError': + exc = ValueError(excstr) + else: + exc = Exception(excstr) + _pendingchangesets[rpc['xid']].send_exception(exc) else: _pendingchangesets[rpc['xid']].send() if 'quorum' in rpc: _hasquorum = rpc['quorum'] - _push_rpc(channel, b'') # use null as ACK + res = _push_rpc(channel, b'') # use null as ACK + if not res: + break msg = lh.get_next_msg() finally: # mark the connection as broken @@ -1137,6 +1159,8 @@ class ConfigManager(object): attribute, match = expression.split('=') else: raise Exception('Invalid Expression') + if attribute.startswith('secret.'): + raise Exception('Filter by secret attributes is not supported') for node in nodes: try: currvals = [self._cfgstore['nodes'][node][attribute]['value']] @@ -1381,7 +1405,7 @@ class ConfigManager(object): return exec_on_leader('_rpc_master_set_user', self.tenant, name, attributemap) if cfgstreams: - exec_on_followers('_rpc_set_user', self.tenant, name) + exec_on_followers('_rpc_set_user', self.tenant, name, attributemap) self._true_set_user(name, attributemap) def _true_set_user(self, name, attributemap): @@ -1461,9 +1485,10 @@ class ConfigManager(object): self._cfgstore['users'][name]['displayname'] = displayname _cfgstore['main']['idmap'][uid] = { 'tenant': self.tenant, - 'username': name + 'username': name, + 'role': role, } - if attributemap is not None: + if attributemap: self._true_set_user(name, attributemap) _mark_dirtykey('users', name, self.tenant) _mark_dirtykey('idmap', uid) @@ -1626,6 +1651,7 @@ class ConfigManager(object): if group in self._cfgstore['nodes'][node]['groups']: self._cfgstore['nodes'][node]['groups'].remove(group) self._node_removed_from_group(node, group, changeset) + _mark_dirtykey('nodes', node, self.tenant) for node in nodes: if node not in self._cfgstore['nodes']: self._cfgstore['nodes'][node] = {'groups': [group]} @@ -1867,6 +1893,8 @@ class ConfigManager(object): eventlet.spawn_n(_do_notifier, self, watcher, callback) def del_nodes(self, nodes): + if isinstance(nodes, set): + nodes = list(nodes) # msgpack can't handle set if cfgleader: # slaved to a collective return exec_on_leader('_rpc_master_del_nodes', self.tenant, nodes) diff --git a/confluent_server/confluent/consoleserver.py b/confluent_server/confluent/consoleserver.py index 51853100..dbc12e43 100644 --- a/confluent_server/confluent/consoleserver.py +++ b/confluent_server/confluent/consoleserver.py @@ -362,13 +362,17 @@ class ConsoleHandler(object): if self.reconnect: self.reconnect.cancel() self.reconnect = None + strerror = ('The console.method attribute for this node is ' + 'not configured,\r\nset it to a valid value for console ' + 'function') try: self._console = list(plugin.handle_path( self._plugin_path.format(self.node), "create", self.cfgmgr))[0] except (exc.NotImplementedException, exc.NotFoundException): self._console = None - except: + except Exception as e: + strerror = str(e) if _tracelog: _tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, event=log.Events.stacktrace) @@ -381,13 +385,9 @@ class ConsoleHandler(object): self._send_rcpts({'connectstate': self.connectstate, 'error': self.error}) self.feedbuffer( - '\x1bc\x1b[2J\x1b[1;1H[The console.method attribute for this node is ' - 'not configured,\r\nset it to a valid value for console ' - 'function]') + '\x1bc\x1b[2J\x1b[1;1H[{0}]'.format(strerror)) self._send_rcpts( - '\x1bc\x1b[2J\x1b[1;1H[The console.method attribute for this node is ' - 'not configured,\r\nset it to a valid value for console ' - 'function]') + '\x1bc\x1b[2J\x1b[1;1H[{0}]'.format(strerror)) self.clearerror = True return if self.clearerror: diff --git a/confluent_server/confluent/core.py b/confluent_server/confluent/core.py index 1b17da17..997781cd 100644 --- a/confluent_server/confluent/core.py +++ b/confluent_server/confluent/core.py @@ -60,19 +60,14 @@ import eventlet.greenpool as greenpool import eventlet.green.ssl as ssl import eventlet.queue as queue import itertools +import msgpack import os -try: - import cPickle as pickle - pargs = {} -except ImportError: - import pickle - pargs = {'encoding': 'utf-8'} import socket import struct import sys pluginmap = {} -dispatch_plugins = (b'ipmi', u'ipmi') +dispatch_plugins = (b'ipmi', u'ipmi', b'redfish', u'redfish', b'tsmsol', u'tsmsol') def seek_element(currplace, currkey): @@ -221,10 +216,14 @@ def _init_core(): 'pluginattrs': ['hardwaremanagement.method'], 'default': 'ipmi', }), + 'extra': PluginRoute({ + 'pluginattrs': ['hardwaremanagement.method'], + 'default': 'ipmi', + }), 'advanced': PluginRoute({ 'pluginattrs': ['hardwaremanagement.method'], 'default': 'ipmi', - }), + }), }, }, 'storage': { @@ -417,18 +416,22 @@ def create_user(inputdata, configmanager): try: username = inputdata['name'] del inputdata['name'] + role = inputdata['role'] + del inputdata['role'] except (KeyError, ValueError): - raise exc.InvalidArgumentException() - configmanager.create_user(username, attributemap=inputdata) + raise exc.InvalidArgumentException('Missing user name or role') + configmanager.create_user(username, role, attributemap=inputdata) def create_usergroup(inputdata, configmanager): try: groupname = inputdata['name'] + role = inputdata['role'] del inputdata['name'] + del inputdata['role'] except (KeyError, ValueError): - raise exc.InvalidArgumentException() - configmanager.create_usergroup(groupname) + raise exc.InvalidArgumentException("Missing user name or role") + configmanager.create_usergroup(groupname, role) def update_usergroup(groupname, attribmap, configmanager): @@ -689,16 +692,22 @@ def handle_dispatch(connection, cert, dispatch, peername): cfm.get_collective_member(peername)['fingerprint'], cert): connection.close() return - pversion = 0 - if bytearray(dispatch)[0] == 0x80: - pversion = bytearray(dispatch)[1] - dispatch = pickle.loads(dispatch, **pargs) + if dispatch[0:2] != b'\x01\x03': # magic value to indicate msgpack + # We only support msgpack now + # The magic should preclude any pickle, as the first byte can never be + # under 0x20 or so. + connection.close() + return + dispatch = msgpack.unpackb(dispatch[2:], raw=False) configmanager = cfm.ConfigManager(dispatch['tenant']) nodes = dispatch['nodes'] inputdata = dispatch['inputdata'] operation = dispatch['operation'] pathcomponents = dispatch['path'] routespec = nested_lookup(noderesources, pathcomponents) + inputdata = msg.get_input_message( + pathcomponents, operation, inputdata, nodes, dispatch['isnoderange'], + configmanager) plugroute = routespec.routeinfo plugpath = None nodesbyhandler = {} @@ -728,18 +737,26 @@ def handle_dispatch(connection, cert, dispatch, peername): configmanager=configmanager, inputdata=inputdata)) for res in itertools.chain(*passvalues): - _forward_rsp(connection, res, pversion) + _forward_rsp(connection, res) except Exception as res: - _forward_rsp(connection, res, pversion) + _forward_rsp(connection, res) connection.sendall('\x00\x00\x00\x00\x00\x00\x00\x00') -def _forward_rsp(connection, res, pversion): +def _forward_rsp(connection, res): try: - r = pickle.dumps(res, protocol=pversion) - except TypeError: - r = pickle.dumps(Exception( - 'Cannot serialize error, check collective.manager error logs for details' + str(res)), protocol=pversion) + r = res.serialize() + except AttributeError: + if isinstance(res, Exception): + r = msgpack.packb(['Exception', str(res)], use_bin_type=False) + else: + r = msgpack.packb( + ['Exception', 'Unable to serialize response ' + repr(res)], + use_bin_type=False) + except Exception as e: + r = msgpack.packb( + ['Exception', 'Unable to serialize response ' + repr(res) + ' due to ' + str(e)], + use_bin_type=False) rlen = len(r) if not rlen: return @@ -830,7 +847,7 @@ def handle_node_request(configmanager, inputdata, operation, del pathcomponents[0:2] passvalues = queue.Queue() plugroute = routespec.routeinfo - inputdata = msg.get_input_message( + msginputdata = msg.get_input_message( pathcomponents, operation, inputdata, nodes, isnoderange, configmanager) if 'handler' in plugroute: # fixed handler definition, easy enough @@ -841,7 +858,7 @@ def handle_node_request(configmanager, inputdata, operation, passvalue = hfunc( nodes=nodes, element=pathcomponents, configmanager=configmanager, - inputdata=inputdata) + inputdata=msginputdata) if isnoderange: return passvalue elif isinstance(passvalue, console.Console): @@ -894,13 +911,13 @@ def handle_node_request(configmanager, inputdata, operation, workers.spawn(addtoqueue, passvalues, hfunc, {'nodes': nodesbyhandler[hfunc], 'element': pathcomponents, 'configmanager': configmanager, - 'inputdata': inputdata}) + 'inputdata': msginputdata}) for manager in nodesbymanager: numworkers += 1 workers.spawn(addtoqueue, passvalues, dispatch_request, { 'nodes': nodesbymanager[manager], 'manager': manager, 'element': pathcomponents, 'configmanager': configmanager, - 'inputdata': inputdata, 'operation': operation}) + 'inputdata': inputdata, 'operation': operation, 'isnoderange': isnoderange}) if isnoderange or not autostrip: return iterate_queue(numworkers, passvalues) else: @@ -944,7 +961,7 @@ def addtoqueue(theq, fun, kwargs): def dispatch_request(nodes, manager, element, configmanager, inputdata, - operation): + operation, isnoderange): a = configmanager.get_collective_member(manager) try: remote = socket.create_connection((a['address'], 13001)) @@ -978,10 +995,10 @@ def dispatch_request(nodes, manager, element, configmanager, inputdata, pvers = 2 tlvdata.recv(remote) myname = collective.get_myname() - dreq = pickle.dumps({'name': myname, 'nodes': list(nodes), - 'path': element,'tenant': configmanager.tenant, - 'operation': operation, 'inputdata': inputdata}, - protocol=pvers) + dreq = b'\x01\x03' + msgpack.packb( + {'name': myname, 'nodes': list(nodes), + 'path': element,'tenant': configmanager.tenant, + 'operation': operation, 'inputdata': inputdata, 'isnoderange': isnoderange}, use_bin_type=False) tlvdata.send(remote, {'dispatch': {'name': myname, 'length': len(dreq)}}) remote.sendall(dreq) while True: @@ -1029,11 +1046,13 @@ def dispatch_request(nodes, manager, element, configmanager, inputdata, return rsp += nrsp try: - rsp = pickle.loads(rsp, **pargs) - except UnicodeDecodeError: - rsp = pickle.loads(rsp, encoding='latin1') + rsp = msg.msg_deserialize(rsp) + except Exception: + rsp = exc.deserialize_exc(rsp) if isinstance(rsp, Exception): raise rsp + if not rsp: + raise Exception('Error in cross-collective serialize/deserialze, see remote logs') yield rsp diff --git a/confluent_server/confluent/discovery/core.py b/confluent_server/confluent/discovery/core.py index 43e6cca7..5171fcac 100644 --- a/confluent_server/confluent/discovery/core.py +++ b/confluent_server/confluent/discovery/core.py @@ -107,6 +107,8 @@ nodehandlers = { 'service:management-hardware.Lenovo:lenovo-xclarity-controller': xcc, 'service:management-hardware.IBM:integrated-management-module2': imm, 'pxe-client': pxeh, + 'onie-switch': None, + 'cumulus-switch': None, 'service:io-device.Lenovo:management-module': None, 'service:thinkagile-storage': cpstorage, 'service:lenovo-tsm': tsm, @@ -114,6 +116,8 @@ nodehandlers = { servicenames = { 'pxe-client': 'pxe-client', + 'onie-switch': 'onie-switch', + 'cumulus-switch': 'cumulus-switch', 'service:lenovo-smm': 'lenovo-smm', 'service:management-hardware.Lenovo:lenovo-xclarity-controller': 'lenovo-xcc', 'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2', @@ -124,6 +128,8 @@ servicenames = { servicebyname = { 'pxe-client': 'pxe-client', + 'onie-switch': 'onie-switch', + 'cumulus-switch': 'cumulus-switch', 'lenovo-smm': 'service:lenovo-smm', 'lenovo-xcc': 'service:management-hardware.Lenovo:lenovo-xclarity-controller', 'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2', @@ -951,9 +957,9 @@ def eval_node(cfg, handler, info, nodename, manual=False): # raise exc.InvalidArgumentException(errorstr) # log.log({'error': errorstr}) if encuuid in pending_by_uuid: - pending_by_uuid[encuuid].add(info) + pending_by_uuid[encuuid].append(info) else: - pending_by_uuid[encuuid] = set([info]) + pending_by_uuid[encuuid] = [info] return # We found the real smm, replace the list with the actual smm # to continue @@ -1096,6 +1102,10 @@ def discover_node(cfg, handler, info, nodename, manual): info['discostatus'] = 'discovered' for i in pending_by_uuid.get(curruuid, []): eventlet.spawn_n(_recheck_single_unknown_info, cfg, i) + try: + del pending_by_uuid[curruuid] + except KeyError: + pass return True log.log({'info': 'Detected {0}, but discovery.policy is not set to a ' 'value allowing discovery (open or permissive)'.format( diff --git a/confluent_server/confluent/discovery/handlers/generic.py b/confluent_server/confluent/discovery/handlers/generic.py index 6b11e01e..51c52a1e 100644 --- a/confluent_server/confluent/discovery/handlers/generic.py +++ b/confluent_server/confluent/discovery/handlers/generic.py @@ -68,17 +68,21 @@ class NodeHandler(object): def _savecert(self, certificate): self._fp = certificate return True - + def get_node_credentials(self, nodename, creds, defuser, defpass): user = creds.get(nodename, {}).get( 'secret.hardwaremanagementuser', {}).get('value', None) havecustomcreds = False + if user and not isinstance(user, str): + user = user.decode('utf8') if user is not None and user != defuser: havecustomcreds = True else: user = defuser passwd = creds.get(nodename, {}).get( 'secret.hardwaremanagementpassword', {}).get('value', None) + if passwd and not isinstance(passwd, str): + passwd = passwd.decode('utf8') if passwd is not None and passwd != defpass: havecustomcreds = True else: diff --git a/confluent_server/confluent/discovery/handlers/smm.py b/confluent_server/confluent/discovery/handlers/smm.py index cb976cba..8f6564ac 100644 --- a/confluent_server/confluent/discovery/handlers/smm.py +++ b/confluent_server/confluent/discovery/handlers/smm.py @@ -98,7 +98,7 @@ class NodeHandler(bmchandler.NodeHandler): setdata += ',v4Gateway:{0}'.format(gateway) wc.request('POST', '/data', setdata) rsp = wc.getresponse() - rspdata = rsp.read() + rspdata = util.stringify(rsp.read()) if '0' not in rspdata: raise Exception("Error configuring SMM Network") return @@ -145,7 +145,7 @@ class NodeHandler(bmchandler.NodeHandler): authdata['password'] = password wc.request('POST', '/data/login', urlencode(authdata), headers) rsp = wc.getresponse() - rspdata = rsp.read() + rspdata = util.stringify(rsp.read()) if 'authResult>0' in rspdata: tokens = fromstring(rspdata) st2 = tokens.findall('st2')[0].text @@ -181,6 +181,10 @@ class NodeHandler(bmchandler.NodeHandler): 'secret.hardwaremanagementuser', {}).get('value', 'USERID') passwd = creds.get(nodename, {}).get( 'secret.hardwaremanagementpassword', {}).get('value', 'PASSW0RD') + if not isinstance(username, str): + username = username.decode('utf8') + if not isinstance(passwd, str): + passwd = passwd.decode('utf8') if passwd == 'PASSW0RD' and self.ruleset: raise Exception('Cannot support default password and setting password rules at same time') if passwd == 'PASSW0RD': diff --git a/confluent_server/confluent/discovery/handlers/tsm.py b/confluent_server/confluent/discovery/handlers/tsm.py index ace75bf5..0fba1b05 100644 --- a/confluent_server/confluent/discovery/handlers/tsm.py +++ b/confluent_server/confluent/discovery/handlers/tsm.py @@ -22,6 +22,7 @@ try: from urllib import urlencode except ImportError: from urllib.parse import urlencode + getaddrinfo = eventlet.support.greendns.getaddrinfo webclient = eventlet.import_patched('pyghmi.util.webclient') @@ -43,6 +44,13 @@ class NodeHandler(generic.NodeHandler): self.atdefault = True super(NodeHandler, self).__init__(info, configmanager) + def scan(self): + c = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert) + i = c.grab_json_response('/redfish/v1/') + uuid = i.get('UUID', None) + if uuid: + self.info['uuid'] = uuid + def validate_cert(self, certificate): # broadly speaking, merely checks consistency moment to moment, # but if https_cert gets stricter, this check means something @@ -54,9 +62,17 @@ class NodeHandler(generic.NodeHandler): 'username': self.DEFAULT_USER, 'password': self.DEFAULT_PASS, } + wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert) + wc.set_header('Content-Type', 'application/json') + authmode = 0 if not self.trieddefault: - wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert) - rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata)) + rsp, status = wc.grab_json_response_with_status('/api/session', authdata) + if status == 403: + wc.set_header('Content-Type', 'application/x-www-form-urlencoded') + authmode = 1 + rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata)) + else: + authmode = 2 if status > 400: rsp = util.stringify(rsp) self.trieddefault = True @@ -68,9 +84,15 @@ class NodeHandler(generic.NodeHandler): 'default_password': self.DEFAULT_PASS, 'username': self.DEFAULT_USER } - rsp, status = wc.grab_json_response_with_status('/api/reset-pass', urlencode(passchange)) + if authmode == 2: + rsp, status = wc.grab_json_response_with_status('/api/reset-pass', passchange) + else: + rsp, status = wc.grab_json_response_with_status('/api/reset-pass', urlencode(passchange)) authdata['password'] = self.targpass - rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata)) + if authmode == 2: + rsp, status = wc.grab_json_response_with_status('/api/session', authdata) + else: + rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata)) self.csrftok = rsp['CSRFToken'] self.channel = rsp['channel'] self.curruser = self.DEFAULT_USER @@ -85,15 +107,23 @@ class NodeHandler(generic.NodeHandler): if self.curruser: authdata['username'] = self.curruser authdata['password'] = self.currpass - rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata)) - if rsp.status != 200: + if authmode != 1: + rsp, status = wc.grab_json_response_with_status('/api/session', authdata) + if authmode == 1 or status == 403: + wc.set_header('Content-Type', 'application/x-www-form-urlencoded') + rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata)) + if status != 200: return None self.csrftok = rsp['CSRFToken'] self.channel = rsp['channel'] return wc authdata['username'] = self.targuser authdata['password'] = self.targpass - rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata)) + if authmode != 1: + rsp, status = wc.grab_json_response_with_status('/api/session', authdata) + if authmode == 1 or status == 403: + wc.set_header('Content-Type', 'application/x-www-form-urlencoded') + rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata)) if status != 200: return None self.curruser = self.targuser diff --git a/confluent_server/confluent/discovery/handlers/xcc.py b/confluent_server/confluent/discovery/handlers/xcc.py index fcc3255f..8f357acf 100644 --- a/confluent_server/confluent/discovery/handlers/xcc.py +++ b/confluent_server/confluent/discovery/handlers/xcc.py @@ -15,6 +15,7 @@ import base64 import codecs import confluent.discovery.handlers.imm as immhandler +import confluent.exceptions as exc import confluent.netutil as netutil import confluent.util as util import errno @@ -95,7 +96,8 @@ class NodeHandler(immhandler.NodeHandler): ipmicmd.xraw_command(netfn=0x3a, command=0xf1, data=(1,)) except pygexc.IpmiException as e: if (e.ipmicode != 193 and 'Unauthorized name' not in str(e) and - 'Incorrect password' not in str(e)): + 'Incorrect password' not in str(e) and + str(e) != 'Session no longer connected'): # raise an issue if anything other than to be expected if disableipmi: _, _ = wc.grab_json_response_with_status( @@ -170,7 +172,12 @@ class NodeHandler(immhandler.NodeHandler): pwdchanged = True if '_csrf_token' in wc.cookies: wc.set_header('X-XSRF-TOKEN', wc.cookies['_csrf_token']) + if pwdchanged: + # Remove the minimum change interval, to allow sane + # password changes after provisional changes + self.set_password_policy('') return (wc, pwdchanged) + return (None, None) @property def wc(self): @@ -204,7 +211,7 @@ class NodeHandler(immhandler.NodeHandler): # however the target *will* demand a new password... if it's currently # PASSW0RD # use TempW0rd42 to avoid divulging a real password on the line - # This is replacing one well known password (PASSW0RD) with another + # This is replacing one well known password (PASSW0RD) with another # (TempW0rd42) passwd = 'TempW0rd42' wc, pwdchanged = self.get_webclient('USERID', 'PASSW0RD', passwd) @@ -226,9 +233,9 @@ class NodeHandler(immhandler.NodeHandler): if wc: return wc - def set_password_policy(self): + def set_password_policy(self, strruleset): ruleset = {'USER_GlobalMinPassChgInt': '0'} - for rule in self.ruleset.split(','): + for rule in strruleset.split(','): if '=' not in rule: continue name, value = rule.split('=') @@ -350,15 +357,18 @@ class NodeHandler(immhandler.NodeHandler): # between hypothetical secure path and today. dpp = self.configmanager.get_node_attributes( nodename, 'discovery.passwordrules') - self.ruleset = dpp.get(nodename, {}).get( + strruleset = dpp.get(nodename, {}).get( 'discovery.passwordrules', {}).get('value', '') wc = self.wc creds = self.configmanager.get_node_attributes( self.nodename, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True) user, passwd, isdefault = self.get_node_credentials(nodename, creds, 'USERID', 'PASSW0RD') - self.set_password_policy() + self.set_password_policy(strruleset) if self._atdefaultcreds: + if isdefault and self.tmppasswd: + raise Exception( + 'Request to use default credentials, but refused by target after it has been changed to {0}'.format(self.tmppasswd)) if not isdefault: self._setup_xcc_account(user, passwd, wc) self._convert_sha256account(user, passwd, wc) diff --git a/confluent_server/confluent/discovery/protocols/pxe.py b/confluent_server/confluent/discovery/protocols/pxe.py index ce093f0a..6a6e1218 100644 --- a/confluent_server/confluent/discovery/protocols/pxe.py +++ b/confluent_server/confluent/discovery/protocols/pxe.py @@ -30,9 +30,16 @@ pxearchs = { '\x00\x07': 'uefi-x64', '\x00\x09': 'uefi-x64', '\x00\x0b': 'uefi-aarch64', + '\x00\x10': 'uefi-httpboot', } +def stringify(value): + string = bytes(value) + if not isinstance(string, str): + string = string.decode('utf8') + return string + def decode_uuid(rawguid): lebytes = struct.unpack_from('HHI', rawguid[8:]) @@ -40,23 +47,50 @@ def decode_uuid(rawguid): lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2]).lower() +def _decode_ocp_vivso(rq, idx, size): + end = idx + size + vivso = {'service-type': 'onie-switch'} + while idx < end: + if rq[idx] == 3: + vivso['machine'] = stringify(rq[idx + 2:idx + 2 + rq[idx + 1]]) + elif rq[idx] == 4: + vivso['arch'] = stringify(rq[idx + 2:idx + 2 + rq[idx + 1]]) + elif rq[idx] == 5: + vivso['revision'] = stringify(rq[idx + 2:idx + 2 + rq[idx + 1]]) + idx += rq[idx + 1] + 2 + return '', None, vivso + + def find_info_in_options(rq, optidx): uuid = None arch = None + vivso = None + ztpurlrequested = False + iscumulus = False try: while uuid is None or arch is None: if rq[optidx] == 53: # DHCP message type # we want only length 1 and only discover (type 1) if rq[optidx + 1] != 1 or rq[optidx + 2] != 1: - return uuid, arch + return uuid, arch, vivso optidx += 3 + elif rq[optidx] == 55: + if 239 in rq[optidx + 2:optidx + 2 + rq[optidx + 1]]: + ztpurlrequested = True + optidx += rq[optidx + 1] + 2 + elif rq[optidx] == 60: + vci = stringify(rq[optidx + 2:optidx + 2 + rq[optidx + 1]]) + if vci.startswith('cumulus-linux'): + iscumulus = True + arch = vci.replace('cumulus-linux', '').strip() + optidx += rq[optidx + 1] + 2 elif rq[optidx] == 97: if rq[optidx + 1] != 17: # 16 bytes of uuid and one reserved byte - return uuid, arch + return uuid, arch, vivso if rq[optidx + 2] != 0: # the reserved byte should be zero, # anything else would be a new spec that we don't know yet - return uuid, arch + return uuid, arch, vivso uuid = decode_uuid(rq[optidx + 3:optidx + 19]) optidx += 19 elif rq[optidx] == 93: @@ -66,11 +100,20 @@ def find_info_in_options(rq, optidx): if archraw in pxearchs: arch = pxearchs[archraw] optidx += 4 + elif rq[optidx] == 125: + #vivso = rq[optidx + 2:optidx + 2 + rq[optidx + 1]] + if rq[optidx + 2:optidx + 6] == b'\x00\x00\xa6\x7f': # OCP + return _decode_ocp_vivso(rq, optidx + 7, rq[optidx + 6]) + optidx += rq[optidx + 1] + 2 else: optidx += rq[optidx + 1] + 2 except IndexError: - return uuid, arch - return uuid, arch + pass + if not vivso and iscumulus and ztpurlrequested: + if not uuid: + uuid = '' + vivso = {'service-type': 'cumulus-switch', 'arch': arch} + return uuid, arch, vivso def snoop(handler, protocol=None): #TODO(jjohnson2): ipv6 socket and multicast for DHCPv6, should that be @@ -101,7 +144,14 @@ def snoop(handler, protocol=None): optidx = rq.index(b'\x63\x82\x53\x63') + 4 except ValueError: continue - uuid, arch = find_info_in_options(rq, optidx) + uuid, arch, vivso = find_info_in_options(rq, optidx) + if vivso: + # info['modelnumber'] = info['attributes']['enclosure-machinetype-model'][0] + handler({'hwaddr': netaddr, 'uuid': uuid, + 'architecture': vivso.get('arch', ''), + 'services': (vivso['service-type'],), + 'attributes': {'enclosure-machinetype-model': [vivso.get('machine', '')]}}) + continue if uuid is None: continue # We will fill out service to have something to byte into, diff --git a/confluent_server/confluent/discovery/protocols/slp.py b/confluent_server/confluent/discovery/protocols/slp.py index 09d04691..2ff45a23 100644 --- a/confluent_server/confluent/discovery/protocols/slp.py +++ b/confluent_server/confluent/discovery/protocols/slp.py @@ -84,6 +84,8 @@ def _parse_SrvRply(parsed): :return: """ payload = parsed['payload'] + if len(payload) < 4: + return ecode, ucount = struct.unpack('!HH', bytes(payload[0:4])) if ecode: parsed['errorcode'] = ecode @@ -234,13 +236,20 @@ def _find_srvtype(net, net4, srvtype, addresses, xid): def _grab_rsps(socks, rsps, interval, xidmap): - r, _, _ = select.select(socks, (), (), interval) + r = None + res = select.select(socks, (), (), interval) + if res: + r = res[0] while r: for s in r: (rsp, peer) = s.recvfrom(9000) neighutil.refresh_neigh() _parse_slp_packet(rsp, peer, rsps, xidmap) - r, _, _ = select.select(socks, (), (), interval) + res = select.select(socks, (), (), interval) + if not res: + r = None + else: + r = res[0] @@ -560,7 +569,7 @@ def scan(srvtypes=_slp_services, addresses=None, localonly=False): # now to analyze and flesh out the responses for id in rsps: if 'service:ipmi' in rsps[id]['services']: - if 'service:ipmi://Athena:623' in rsps[id]['urls']: + if 'service:ipmi://Athena:623' in rsps[id].get('urls', ''): rsps[id]['services'] = ['service:thinkagile-storage'] else: continue diff --git a/confluent_server/confluent/exceptions.py b/confluent_server/confluent/exceptions.py index 80f5337e..cb856094 100644 --- a/confluent_server/confluent/exceptions.py +++ b/confluent_server/confluent/exceptions.py @@ -17,7 +17,17 @@ import base64 import json +import msgpack +def deserialize_exc(msg): + excd = msgpack.unpackb(msg, raw=False) + if excd[0] == 'Exception': + return Exception(excd[1]) + if excd[0] not in globals(): + return Exception('Cannot deserialize: {0}'.format(repr(excd))) + if not issubclass(excd[0], ConfluentException): + return Exception('Cannot deserialize: {0}'.format(repr(excd))) + return globals(excd[0])(*excd[1]) class ConfluentException(Exception): apierrorcode = 500 @@ -27,6 +37,10 @@ class ConfluentException(Exception): errstr = ' - '.join((self._apierrorstr, str(self))) return json.dumps({'error': errstr }) + def serialize(self): + return msgpack.packb([self.__class__.__name__, [str(self)]], + use_bin_type=False) + @property def apierrorstr(self): if str(self): @@ -104,16 +118,24 @@ class PubkeyInvalid(ConfluentException): def __init__(self, text, certificate, fingerprint, attribname, event): super(PubkeyInvalid, self).__init__(self, text) + self.myargs = (text, certificate, fingerprint, attribname, event) self.fingerprint = fingerprint self.attrname = attribname self.message = text + certtxt = base64.b64encode(certificate) + if not isinstance(certtxt, str): + certtxt = certtxt.decode('utf8') bodydata = {'message': text, 'event': event, 'fingerprint': fingerprint, 'fingerprintfield': attribname, - 'certificate': base64.b64encode(certificate)} + 'certificate': certtxt} self.errorbody = json.dumps(bodydata) + def serialize(self): + return msgpack.packb([self.__class__.__name__, self.myargs], + use_bin_type=False) + def get_error_body(self): return self.errorbody diff --git a/confluent_server/confluent/firmwaremanager.py b/confluent_server/confluent/firmwaremanager.py index 6f819061..f3151615 100644 --- a/confluent_server/confluent/firmwaremanager.py +++ b/confluent_server/confluent/firmwaremanager.py @@ -36,20 +36,40 @@ _tracelog = None def execupdate(handler, filename, updateobj, type, owner, node): global _tracelog - if type != 'ffdc' and not os.path.exists(filename): - errstr = '{0} does not appear to exist on {1}'.format( - filename, socket.gethostname()) - updateobj.handle_progress({'phase': 'error', 'progress': 0.0, - 'detail': errstr}) - return + if type != 'ffdc': + errstr = False + if not os.path.exists(filename): + errstr = '{0} does not appear to exist on {1}'.format( + filename, socket.gethostname()) + elif not os.access(filename, os.R_OK): + errstr = '{0} is not readable by confluent on {1} (ensure confluent user or group can access file and parent directories)'.format( + filename, socket.gethostname()) + if errstr: + updateobj.handle_progress({'phase': 'error', 'progress': 0.0, + 'detail': errstr}) + return if type == 'ffdc' and os.path.isdir(filename): - filename += '/' + node + '.svcdata' + filename += '/' + node + if 'type' == 'ffdc': + errstr = False + if os.path.exists(filename): + errstr = '{0} already exists on {1}, cannot overwrite'.format( + filename, socket.gethostname()) + elif not os.access(os.path.dirname(filename), os.W_OK): + errstr = '{0} directory not writable by confluent user/group on {1}, check the directory and parent directory ownership and permissions'.format(filename, socket.gethostname()) + if errstr: + updateobj.handle_progress({'phase': 'error', 'progress': 0.0, + 'detail': errstr}) + return try: if type == 'firmware': completion = handler(filename, progress=updateobj.handle_progress, bank=updateobj.bank) else: completion = handler(filename, progress=updateobj.handle_progress) + if type == 'ffdc' and completion: + filename = completion + completion = None if completion is None: completion = 'complete' if owner: diff --git a/confluent_server/confluent/main.py b/confluent_server/confluent/main.py index 50978a95..c009198e 100644 --- a/confluent_server/confluent/main.py +++ b/confluent_server/confluent/main.py @@ -77,13 +77,16 @@ def _daemonize(): print('confluent server starting as pid {0}'.format(thispid)) os._exit(0) os.closerange(0, 2) - os.umask(63) os.open(os.devnull, os.O_RDWR) os.dup2(0, 1) os.dup2(0, 2) + log.daemonized = True + + +def _redirectoutput(): + os.umask(63) sys.stdout = log.Logger('stdout', buffered=False) sys.stderr = log.Logger('stderr', buffered=False) - log.daemonized = True def _updatepidfile(): @@ -206,7 +209,7 @@ def setlimits(): pass -def run(): +def run(args): setlimits() try: signal.signal(signal.SIGUSR1, dumptrace) @@ -232,7 +235,10 @@ def run(): except (OSError, IOError) as e: print(repr(e)) sys.exit(1) - _daemonize() + if '-f' not in args: + _daemonize() + if '-o' not in args: + _redirectoutput() if havefcntl: _updatepidfile() signal.signal(signal.SIGINT, terminate) diff --git a/confluent_server/confluent/messages.py b/confluent_server/confluent/messages.py index fd2151c5..b14d12ee 100644 --- a/confluent_server/confluent/messages.py +++ b/confluent_server/confluent/messages.py @@ -24,6 +24,7 @@ import confluent.config.conf as cfgfile from copy import deepcopy from datetime import datetime import confluent.util as util +import msgpack import json try: @@ -76,7 +77,7 @@ def _htmlify_structure(indict): if datum is None: nd.append('') else: - nd.append(datum) + nd.append(util.stringify(datum)) ret += ",".join(nd) else: for v in indict: @@ -84,6 +85,13 @@ def _htmlify_structure(indict): return ret + '' +def msg_deserialize(packed): + m = msgpack.unpackb(packed, raw=False) + cls = globals()[m[0]] + if issubclass(cls, ConfluentMessage) or issubclass(cls, ConfluentNodeError): + return cls(*m[1:]) + raise Exception("Unknown shenanigans") + class ConfluentMessage(object): apicode = 200 readonly = False @@ -105,6 +113,15 @@ class ConfluentMessage(object): jsonsnippet = json.dumps(datasource, sort_keys=True, separators=(',', ':'))[1:-1] return jsonsnippet + def serialize(self): + msg = [self.__class__.__name__] + msg.extend(self.myargs) + return msgpack.packb(msg, use_bin_type=False) + + @classmethod + def deserialize(cls, data): + return cls(*data) + def raw(self): """Return pythonic representation of the response. @@ -138,6 +155,7 @@ class ConfluentMessage(object): snippet = "" for key in pairs: val = pairs[key] + key = util.stringify(key) value = self.defaultvalue if isinstance(val, dict) and 'type' in val: valtype = val['type'] @@ -210,6 +228,15 @@ class ConfluentNodeError(object): self.node = node self.error = errorstr + def serialize(self): + return msgpack.packb( + [self.__class__.__name__, self.node, self.error], + use_bin_type=False) + + @classmethod + def deserialize(cls, data): + return cls(*data) + def raw(self): return {'databynode': {self.node: {'errorcode': self.apicode, 'error': self.error}}} @@ -220,7 +247,7 @@ class ConfluentNodeError(object): def strip_node(self, node): # NOTE(jjohnson2): For single node errors, raise exception to # trigger what a developer of that medium would expect - raise Exception(self.error) + raise Exception('{0}: {1}'.format(self.node, self.error)) class ConfluentResourceUnavailable(ConfluentNodeError): @@ -259,9 +286,9 @@ class ConfluentTargetNotFound(ConfluentNodeError): class ConfluentTargetInvalidCredentials(ConfluentNodeError): apicode = 502 - def __init__(self, node): + def __init__(self, node, errstr='bad credentials'): self.node = node - self.error = 'bad credentials' + self.error = errstr def strip_node(self, node): raise exc.TargetEndpointBadCredentials @@ -270,6 +297,7 @@ class ConfluentTargetInvalidCredentials(ConfluentNodeError): class DeletedResource(ConfluentMessage): notnode = True def __init__(self, resource): + self.myargs = [resource] self.kvpairs = {'deleted': resource} def strip_node(self, node): @@ -281,6 +309,7 @@ class CreatedResource(ConfluentMessage): readonly = True def __init__(self, resource): + self.myargs = [resource] self.kvpairs = {'created': resource} def strip_node(self, node): @@ -292,6 +321,7 @@ class RenamedResource(ConfluentMessage): readonly = True def __init__(self, oldname, newname): + self.myargs = (oldname, newname) self.kvpairs = {'oldname': oldname, 'newname': newname} def strip_node(self, node): @@ -300,6 +330,7 @@ class RenamedResource(ConfluentMessage): class RenamedNode(ConfluentMessage): def __init__(self, name, rename): + self.myargs = (name, rename) self.desc = 'New Name' kv = {'rename': {'value': rename}} self.kvpairs = {name: kv} @@ -310,13 +341,16 @@ class AssignedResource(ConfluentMessage): readonly = True def __init__(self, resource): + self.myargs = [resource] self.kvpairs = {'assigned': resource} + class ConfluentChoiceMessage(ConfluentMessage): valid_values = set() valid_paramset = {} def __init__(self, node, state): + self.myargs = (node, state) self.stripped = False self.kvpairs = { node: { @@ -338,6 +372,7 @@ class ConfluentChoiceMessage(ConfluentMessage): snippet = '' for key in pairdata: val = pairdata[key] + key = util.stringify(key) snippet += key + ':