2
0
mirror of https://github.com/xcat2/confluent.git synced 2026-04-13 04:11:31 +00:00

Merge branch 'master' into nodesearch

This commit is contained in:
Jarrod Johnson
2020-02-20 20:37:23 -05:00
55 changed files with 1285 additions and 283 deletions

View File

@@ -76,6 +76,11 @@ import confluent.termhandler as termhandler
import confluent.tlvdata as tlvdata
import confluent.client as client
try:
unicode
except NameError:
unicode = str
conserversequence = '\x05c' # ctrl-e, c
clearpowermessage = False
@@ -299,8 +304,11 @@ currchildren = None
def print_result(res):
global exitcode
if 'errorcode' in res or 'error' in res:
print(res['error'])
if 'errorcode' in res:
exitcode |= res['errorcode']
return
if 'databynode' in res:
print_result(res['databynode'])

View File

@@ -50,6 +50,9 @@ argparser.add_option('-c', '--clear', action='store_true',
help='Clear attributes')
argparser.add_option('-p', '--prompt', action='store_true',
help='Prompt for attribute values interactively')
argparser.add_option('-m', '--maxnodes', type='int',
help='Prompt if trying to set attributes on more '
'than specified number of nodes')
(options, args) = argparser.parse_args()
@@ -87,6 +90,7 @@ if len(args) > 1:
if oneval != twoval:
print('Values did not match.')
argassign[arg] = twoval
session.stop_if_noderange_over(noderange, options.maxnodes)
exitcode=client.updateattrib(session,args,nodetype, noderange, options, argassign)
try:
# setting user output to what the user inputs

View File

@@ -32,6 +32,8 @@ if path.startswith('/opt'):
import confluent.client as client
argparser = optparse.OptionParser(usage="Usage: %prog <noderange>")
argparser.add_option('-m', '--maxnodes', type='int',
help='Number of nodes to affect before prompting for confirmation')
(options, args) = argparser.parse_args()
try:
noderange = args[0]
@@ -43,8 +45,10 @@ session = client.Command()
exitcode = 0
errorNodes = set([])
session.stop_if_noderange_over(noderange, options.maxnodes)
success = session.simple_noderange_command(noderange, 'configuration/management_controller/reset', 'reset', key='state', errnodes=errorNodes) # = 0 if successful
if success != 0:
sys.exit(success)
# Determine which nodes were successful and print them

View File

@@ -42,6 +42,10 @@ argparser.add_option('-p', '--persist', dest='persist', action='store_true',
default=False,
help='Request the boot device be persistent rather than '
'one time')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to boot, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
@@ -54,8 +58,8 @@ except IndexError:
sys.exit(1)
client.check_globbing(noderange)
bootdev = None
if len(sys.argv) > 2:
bootdev = sys.argv[2]
if len(args) > 1:
bootdev = args[1]
if bootdev in ('net', 'pxe'):
bootdev = 'network'
session = client.Command()
@@ -66,6 +70,7 @@ else:
bootmode = 'uefi'
errnodes = set([])
session.stop_if_noderange_over(noderange, options.maxnodes)
rc = session.simple_noderange_command(noderange, '/boot/nextdevice', bootdev,
bootmode=bootmode,
persistent=options.persist,

View File

@@ -54,6 +54,12 @@ argparser.add_option('-d', '--detail', dest='detail',
action='store_true', default=False,
help='Provide verbose information as available, such as '
'help text and possible valid values')
argparser.add_option('-e', '--extra', dest='extra',
action='store_true', default=False,
help='Access extra configuration. Extra configuration is generally '
'reserved for unpopular or redundant options that may be slow to '
'read. Notably the IMM category on Lenovo settings is considered '
'to be extra configuration')
argparser.add_option('-x', '--exclude', dest='exclude',
action='store_true', default=False,
help='Treat positional arguments as items to not '
@@ -68,6 +74,10 @@ argparser.add_option('-r', '--restoredefault', default=False,
help='Restore the configuration of the node '
'to factory default for given component. '
'Currently only uefi is supported')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to configure, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
cfgpaths = {
@@ -99,6 +109,7 @@ assignment = {}
queryparms = {}
printsys = []
printbmc = []
printextbmc = []
printallbmc = False
setsys = {}
forceset = False
@@ -173,11 +184,17 @@ def parse_config_line(arguments):
del queryparms[path]
except KeyError:
pass
if not matchedparms:
if param.lower() == 'imm':
printextbmc.append(param)
options.extra = True
elif not matchedparms:
printsys.append(param)
elif param not in cfgpaths:
if param.startswith('bmc.'):
printbmc.append(param.replace('bmc.', ''))
elif param.lower().startswith('imm'):
options.extra = True
printextbmc.append(param)
else:
printsys.append(param)
else:
@@ -205,6 +222,7 @@ else:
session = client.Command()
rcode = 0
if options.restoredefault:
session.stop_if_noderange_over(noderange, options.maxnodes)
if options.restoredefault.lower() in (
'sys', 'system', 'uefi', 'bios'):
for fr in session.update(
@@ -225,6 +243,7 @@ if options.restoredefault:
options.restoredefault))
sys.exit(1)
if setmode:
session.stop_if_noderange_over(noderange, options.maxnodes)
if options.exclude:
sys.stderr.write('Cannot use exclude and assign at the same time\n')
sys.exit(1)
@@ -269,10 +288,15 @@ else:
NullOpt(), queryparms[path])
if rc:
sys.exit(rc)
if printbmc or printallbmc:
rcode = client.print_attrib_path(
'/noderange/{0}/configuration/management_controller/extended/all'.format(noderange),
session, printbmc, options, attrprefix='bmc.')
if printsys == 'all' or printextbmc or printbmc or printallbmc:
if printbmc or not printextbmc:
rcode = client.print_attrib_path(
'/noderange/{0}/configuration/management_controller/extended/all'.format(noderange),
session, printbmc, options, attrprefix='bmc.')
if options.extra:
rcode |= client.print_attrib_path(
'/noderange/{0}/configuration/management_controller/extended/extra'.format(noderange),
session, printextbmc, options)
if printsys or options.exclude:
if printsys == 'all':
printsys = []
@@ -281,7 +305,6 @@ else:
else:
path = '/noderange/{0}/configuration/system/advanced'.format(
noderange)
rcode = client.print_attrib_path(path, session, printsys,
options)
sys.exit(rcode)

View File

@@ -38,6 +38,10 @@ if sys.version_info[0] < 3:
argparser = optparse.OptionParser(
usage="Usage: %prog [options] noderange [clear]")
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to clear if clearing log, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
try:
noderange = args[0]
@@ -46,11 +50,11 @@ except IndexError:
sys.exit(1)
client.check_globbing(noderange)
deletemode = False
if len(sys.argv) > 3:
if len(args) > 2:
argparser.print_help()
sys.exit(1)
if len(sys.argv) == 3:
if sys.argv[2] == 'clear':
if len(args) == 2:
if args[1] == 'clear':
deletemode = True
else:
argparser.print_help()
@@ -88,6 +92,7 @@ def format_event(evt):
if deletemode:
func = session.delete
session.stop_if_noderange_over(noderange, options.maxnodes)
else:
func = session.read
for rsp in func('/noderange/{0}/events/hardware/log'.format(noderange)):

View File

@@ -59,6 +59,10 @@ argparser = optparse.OptionParser(
"%prog <noderange> [list][update [--backup <file>]]|[<components>]")
argparser.add_option('-b', '--backup', action='store_true',
help='Target a backup bank rather than primary')
argparser.add_option('-m', '--maxnodes', type='int',
help='When updating, prompt if more than the specified '
'number of servers will be affected')
(options, args) = argparser.parse_args()
upfile = None
try:
@@ -95,6 +99,7 @@ def get_update_progress(session, url):
def update_firmware(session, filename):
global exitcode
session.stop_if_noderange_over(noderange, options.maxnodes)
output = sq.ScreenPrinter(noderange, session)
nodeurls = {}
filename = os.path.abspath(filename)

View File

@@ -37,6 +37,10 @@ exitcode = 0
argparser = optparse.OptionParser(
usage="Usage: "
"%prog <noderange> [list][install <file>|save <directory>|delete <name>]")
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to delete licenses from, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
upfile = None
downdir = None
@@ -52,7 +56,7 @@ try:
delete = args[2]
elif args[1] != 'list':
argparser.print_help()
sys.exit(1)
sys.exit(1)
except IndexError:
argparser.print_help()
sys.exit(1)
@@ -138,6 +142,7 @@ try:
elif downdir:
save_licenses(session, downdir)
elif delete:
session.stop_if_noderange_over(noderange, options.maxnodes)
delete_license(session, delete)
else:
show_licenses(session)

View File

@@ -37,6 +37,11 @@ argparser = optparse.OptionParser(
argparser.add_option('-p', '--showprevious', dest='previous',
action='store_true', default=False,
help='Show previous power state')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to change power state, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
try:
noderange = args[0]
@@ -72,4 +77,4 @@ if options.previous:
# add dictionary to session
session.add_precede_dict(prev)
sys.exit(session.simple_noderange_command(noderange, '/power/state', setstate))
sys.exit(session.simple_noderange_command(noderange, '/power/state', setstate, promptover=options.maxnodes))

View File

@@ -35,6 +35,10 @@ import confluent.client as client
argparser = optparse.OptionParser(
usage='''\n %prog noderange
\n ''')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to delete, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
if len(args) != 1:
argparser.print_help()
@@ -43,6 +47,7 @@ noderange = args[0]
client.check_globbing(noderange)
session = client.Command()
exitcode = 0
session.stop_if_noderange_over(noderange, options.maxnodes)
for r in session.delete('/noderange/{0}'.format(noderange)):
if 'error' in r:
sys.stderr.write(r['error'] + '\n')

View File

@@ -32,6 +32,10 @@ if path.startswith('/opt'):
import confluent.client as client
argparser = optparse.OptionParser(usage="Usage: %prog <noderange>")
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to reseat, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
try:
noderange = args[0]
@@ -43,7 +47,7 @@ session = client.Command()
exitcode = 0
errorNodes = set([])
session.stop_if_noderange_over(noderange, options.maxnodes)
success = session.simple_noderange_command(noderange, 'power/reseat', 'reseat', key='reseat', errnodes=errorNodes) # = 0 if successful
# Determine which nodes were successful and print them

View File

@@ -42,6 +42,10 @@ def run():
argparser = optparse.OptionParser(
usage="Usage: %prog location noderange:location",
)
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to run rsync to, '
'prompting if over the threshold')
argparser.add_option('-f', '-c', '--count', type='int', default=168,
help='Number of nodes to concurrently rsync')
# among other things, FD_SETSIZE limits. Besides, spawning too many
@@ -64,7 +68,7 @@ def run():
pipedesc = {}
pendingexecs = deque()
exitcode = 0
c.stop_if_noderange_over(noderange, options.maxnodes)
for exp in c.create('/noderange/{0}/attributes/expression'.format(noderange),
{'expression': cmdstr}):
if 'error' in exp:
@@ -100,7 +104,7 @@ def run():
if desc['type'] == 'stdout':
if node not in pernodeout:
pernodeout[node] = ''
pernodeout[node] += stringify(data)
pernodeout[node] += client.stringify(data)
if '\n' in pernodeout[node]:
currout, pernodeout[node] = pernodeout[node].split('\n', 1)
if currout:

View File

@@ -46,6 +46,10 @@ def run():
help='Number of commands to run at a time')
argparser.add_option('-n', '--nonodeprefix', action='store_true',
help='Do not prefix output with node names')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to run the command with, '
'prompting if over the threshold')
# among other things, FD_SETSIZE limits. Besides, spawning too many
# processes can be unkind for the unaware on memory pressure and such...
argparser.disable_interspersed_args()
@@ -63,7 +67,7 @@ def run():
pipedesc = {}
pendingexecs = deque()
exitcode = 0
c.stop_if_noderange_over(args[0], options.maxnodes)
for exp in c.create('/noderange/{0}/attributes/expression'.format(args[0]),
{'expression': cmdstr}):
if 'error' in exp:

View File

@@ -43,7 +43,10 @@ argparser.add_option('-p', '--persist', dest='persist', action='store_true',
argparser.add_option('-u', '--uefi', dest='uefi', action='store_true',
default=True,
help='Request UEFI style boot (rather than BIOS)')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to modify next boot device, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
try:
@@ -63,6 +66,7 @@ if options.biosmode:
bootmode = 'bios'
else:
bootmode = 'uefi'
session.stop_if_noderange_over(noderange, options.maxnodes)
sys.exit(session.simple_noderange_command(noderange, '/boot/nextdevice', bootdev,
bootmode=bootmode,
persistent=options.persist))

View File

@@ -46,6 +46,10 @@ def run():
help='Number of commands to run at a time')
argparser.add_option('-n', '--nonodeprefix', action='store_true',
help='Do not prefix output with node names')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to run remote ssh command to, '
'prompting if over the threshold')
# among other things, FD_SETSIZE limits. Besides, spawning too many
# processes can be unkind for the unaware on memory pressure and such...
argparser.disable_interspersed_args()
@@ -55,7 +59,7 @@ def run():
sys.exit(1)
client.check_globbing(args[0])
concurrentprocs = options.count
c = client.Command()
c = client.Command()
cmdstr = " ".join(args[1:])
currprocs = 0
@@ -64,7 +68,7 @@ def run():
pendingexecs = deque()
exitcode = 0
c.stop_if_noderange_over(args[0], options.maxnodes)
for exp in c.create('/noderange/{0}/attributes/expression'.format(args[0]),
{'expression': cmdstr}):
if 'error' in exp:

View File

@@ -63,10 +63,16 @@ def _print_cfg(scfg):
sys.stderr.write(e['error'] + '\n')
exitcode = e.get('errorcode', 1)
for node in e.get('databynode', {}):
curr = e['databynode'][node]
if 'error' in curr:
if 'no available drives' in curr['error']:
curr['error'] += ' (drives must be in unconfigured state to be available, they must not be in jbod or online state)'
sys.stderr.write('{0}: {1}\n'.format(node, curr['error']))
exitcode = curr.get('errorcode', 1)
continue
if node not in storagebynode:
storagebynode[node] = {'disks': [], 'arrays': [],
'volumes': []}
curr = e['databynode'][node]
storagebynode[node][curr['type'] + 's'].append(curr)
for node in storagebynode:
for disk in sorted(storagebynode[node]['disks'],
@@ -108,6 +114,7 @@ def createstorage(noderange, options, args):
sys.stderr.write('-r and -d are required arguments to create array\n')
sys.exit(1)
session = client.Command()
session.stop_if_noderange_over(noderange, options.maxnodes)
names = options.name
if names is None:
names = ''.join(args)
@@ -132,6 +139,7 @@ def deletestorage(noderange, options, args):
else:
names = options.name
session = client.Command()
session.stop_if_noderange_over(noderange, options.maxnodes)
for rsp in session.delete(
'/noderange/{0}/configuration/storage/volumes/{1}'.format(
noderange, names)):
@@ -162,6 +170,7 @@ def setdisk(noderange, options, args):
sys.stderr.write('diskset requires valid state as argument (hotspare, jbod, unconfigured)\n')
sys.exit(1)
session = client.Command()
session.stop_if_noderange_over(noderange, options.maxnodes)
scfg = session.update('/noderange/{0}/configuration/storage/disks/{1}'.format(noderange, names), {'state': args[0]})
_print_cfg(scfg)
@@ -202,6 +211,10 @@ def main():
help='Comma separated list of stripsizes to use when creating volumes. '
'This value is in kilobytes. The default behavior is to allow the '
'storage controller to decide.')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to configure storage on, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
if len(args) == 1:
args.append('show')

View File

@@ -64,7 +64,7 @@ def printerror(res, node=None):
def download_servicedata(noderange, media):
def download_servicedata(noderange, media, options):
global exitcode
session = client.Command()
output = sq.ScreenPrinter(noderange, session)
@@ -73,6 +73,7 @@ def download_servicedata(noderange, media):
upargs = {'filename': filename}
noderrs = {}
nodeurls = {}
session.stop_if_noderange_over(noderange, options.maxnodes)
for res in session.create(resource, upargs):
if 'created' not in res:
for nodename in res.get('databynode', ()):
@@ -121,6 +122,10 @@ def main():
'management server (the confluent server if running remote, '
'and the collective.manager if in collective)\n'
'\n\nSee `man %prog` for more info.\n')
argparser.add_option('-m', '--maxnodes', type='int',
help='Specify a maximum number of '
'nodes to download diagnostic data from, '
'prompting if over the threshold')
(options, args) = argparser.parse_args()
media = None
try:
@@ -142,6 +147,6 @@ def main():
except KeyError:
argparser.print_help()
sys.exit(1)
handler(noderange, media)
handler(noderange, media, options)
if __name__ == '__main__':
main()

View File

@@ -39,6 +39,10 @@ _attraliases = {
'bmcpass': 'secret.hardwaremanagementpassword',
}
try:
input = raw_input
except NameError:
pass
def stringify(instr):
# Normalize unicode and bytes to 'str', correcting for
@@ -219,7 +223,7 @@ class Command(object):
return rc
def simple_noderange_command(self, noderange, resource, input=None,
key=None, errnodes=None, **kwargs):
key=None, errnodes=None, promptover=None, **kwargs):
try:
self._currnoderange = noderange
rc = 0
@@ -235,6 +239,7 @@ class Command(object):
noderange, resource)):
rc = self.handle_results(ikey, rc, res, errnodes)
else:
self.stop_if_noderange_over(noderange, promptover)
kwargs[ikey] = input
for res in self.update('/noderange/{0}/{1}'.format(
noderange, resource), kwargs):
@@ -244,6 +249,33 @@ class Command(object):
except KeyboardInterrupt:
cprint('')
return 0
def stop_if_noderange_over(self, noderange, maxnodes):
if maxnodes is None:
return
nsize = self.get_noderange_size(noderange)
if nsize > maxnodes:
if nsize == 1:
nodename = list(self.read(
'/noderange/{0}/nodes/'.format(noderange)))[0].get('item', {}).get('href', None)
nodename = nodename[:-1]
p = input('Command is about to affect node {0}, continue (y/n)? '.format(nodename))
else:
p = input('Command is about to affect {0} nodes, continue (y/n)? '.format(nsize))
if p.lower() != 'y':
sys.stderr.write('Aborting at user request\n')
sys.exit(1)
raise Exception("Aborting at user request")
def get_noderange_size(self, noderange):
numnodes = 0
for node in self.read('/noderange/{0}/nodes/'.format(noderange)):
if node.get('item', {}).get('href', None):
numnodes += 1
else:
raise Exception("Error trying to size noderange {0}".format(noderange))
return numnodes
def simple_nodegroups_command(self, noderange, resource, input=None, key=None, **kwargs):
try:
@@ -344,12 +376,8 @@ class Command(object):
if fingerprint == khf[hostid]:
return
else:
try:
replace = raw_input(
"MISMATCHED CERTIFICATE DATA, ACCEPT NEW? (y/n):")
except NameError:
replace = input(
"MISMATCHED CERTIFICATE DATA, ACCEPT NEW? (y/n):")
replace = input(
"MISMATCHED CERTIFICATE DATA, ACCEPT NEW? (y/n):")
if replace not in ('y', 'Y'):
raise Exception("BAD CERTIFICATE")
cprint('Adding new key for %s:%s' % (server, port))
@@ -401,6 +429,10 @@ def printattributes(session, requestargs, showtype, nodetype, noderange, options
path = '/{0}/{1}/attributes/{2}'.format(nodetype, noderange, showtype)
return print_attrib_path(path, session, requestargs, options)
def _sort_attrib(k):
if isinstance(k[1], dict) and k[1].get('sortid', None) is not None:
return k[1]['sortid']
return k[0]
def print_attrib_path(path, session, requestargs, options, rename=None, attrprefix=None):
exitcode = 0
@@ -411,9 +443,7 @@ def print_attrib_path(path, session, requestargs, options, rename=None, attrpref
exitcode = 1
continue
for node in sorted(res['databynode']):
for attr, val in sorted(
res['databynode'][node].items(),
key=lambda k: k[1].get('sortid', k[0]) if isinstance(k[1], dict) else k[0]):
for attr, val in sorted(res['databynode'][node].items(), key=_sort_attrib):
if attr == 'error':
sys.stderr.write('{0}: Error: {1}\n'.format(node, val))
continue

View File

@@ -46,9 +46,8 @@ alias nodelicense='CURRENT_CMDLINE=$(HISTTIMEFORMAT= builtin history 1); export
_confluent_get_args()
{
CMPARGS=($COMP_LINE)
NUMARGS=${#CMPARGS[@]}
if [ "${COMP_WORDS[-1]}" == '' ]; then
NUMARGS=$((NUMARGS+1))
NUMARGS=$((COMP_CWORD+1))
if [ "${COMP_WORDS[COMP_CWORD]}" == '' ]; then
CMPARGS+=("")
fi
GENNED=""
@@ -75,7 +74,7 @@ function _confluent_generic_completion()
{
_confluent_get_args
if [ $NUMARGS -ge 3 ] && [ ! -z "$GENNED" ]; then
COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[-1]}))
COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[COMP_CWORD]}))
fi
if [ $NUMARGS -lt 3 ]; then
_confluent_nr_completion
@@ -111,7 +110,7 @@ _confluent_nodemedia_completion()
return
fi
if [ $NUMARGS -ge 3 ] && [ ! -z "$GENNED" ]; then
COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[-1]}))
COMPREPLY=($(compgen -W "$GENNED" -- ${COMP_WORDS[COMP_CWORD]}))
return;
fi
if [ $NUMARGS -lt 3 ]; then
@@ -124,7 +123,7 @@ _confluent_nodefirmware_completion()
{
_confluent_get_args
if [ $NUMARGS == 3 ]; then
COMPREPLY=($(compgen -W "list update" -- ${COMP_WORDS[-1]}))
COMPREPLY=($(compgen -W "list update" -- ${COMP_WORDS[COMP_CWORD]}))
return;
fi
if [ $NUMARGS -gt 3 ] && [ ${CMPARGS[2]} == 'update' ]; then
@@ -142,7 +141,7 @@ _confluent_nodeshell_completion()
{
_confluent_get_args
if [ $NUMARGS == 3 ]; then
COMPREPLY=($(compgen -c -- ${COMP_WORDS[-1]}))
COMPREPLY=($(compgen -c -- ${COMP_WORDS[COMP_CWORD]}))
return
fi
if [ $NUMARGS -gt 3 ]; then
@@ -160,7 +159,7 @@ _confluent_nodelicense_completion()
{
_confluent_get_args
if [ $NUMARGS == 3 ]; then
COMPREPLY=($(compgen -W "install list save delete" -- ${COMP_WORDS[-1]}))
COMPREPLY=($(compgen -W "install list save delete" -- ${COMP_WORDS[COMP_CWORD]}))
return;
fi
if [ $NUMARGS == 4 ] && [ ${CMPARGS[2]} == 'install' ]; then
@@ -183,7 +182,7 @@ _confluent_nodesupport_completion()
{
_confluent_get_args
if [ $NUMARGS == 3 ]; then
COMPREPLY=($(compgen -W "servicedata" -- ${COMP_WORDS[-1]}))
COMPREPLY=($(compgen -W "servicedata" -- ${COMP_WORDS[COMP_CWORD]}))
return;
fi
if [ $NUMARGS == 4 ] && [ ${CMPARGS[2]} == 'servicedata' ]; then
@@ -210,41 +209,36 @@ _confluent_nn_completion()
if [ $NUMARGS -gt 2 ]; then
return;
fi
INPUT=${COMP_WORDS[-1]}
INPUT=${COMP_WORDS[COMP_CWORD]}
INPUT=${INPUT##*,-}
INPUT=${INPUT##*,}
INPUT=${INPUT##*@}
PREFIX=""
if [ "$INPUT" != "${COMP_WORDS[-1]}" ]; then
PREFIX=${COMP_WORDS[-1]}
if [ "$INPUT" != "${COMP_WORDS[COMP_CWORD]}" ]; then
PREFIX=${COMP_WORDS[COMP_CWORD]}
PREFIX=$(echo $PREFIX | sed -e 's/,[^,@-]*$/,/' -e 's/,-[^,@]*$/,-/' -e 's/@[^,@]*/@/')
fi
COMPREPLY=($(compgen -W "$(nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[-1]}"))
COMPREPLY=($(compgen -W "$(nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[COMP_CWORD]}"))
}
_confluent_nr_completion()
{
CMPARGS=($COMP_LINE)
NUMARGS=${#CMPARGS[@]}
if [ "${COMP_WORDS[-1]}" == '' ]; then
NUMARGS=$((NUMARGS+1))
fi
_confluent_get_args
if [ $NUMARGS -gt 2 ]; then
return;
fi
INPUT=${COMP_WORDS[-1]}
INPUT=${COMP_WORDS[COMP_CWORD]}
INPUT=${INPUT##*,-}
INPUT=${INPUT##*,}
INPUT=${INPUT##*@}
PREFIX=""
if [ "$INPUT" != "${COMP_WORDS[-1]}" ]; then
PREFIX=${COMP_WORDS[-1]}
if [ "$INPUT" != "${COMP_WORDS[COMP_CWORD]}" ]; then
PREFIX=${COMP_WORDS[COMP_CWORD]}
PREFIX=$(echo $PREFIX | sed -e 's/,[^,@-]*$/,/' -e 's/,-[^,@]*$/,-/' -e 's/@[^,@]*/@/')
fi
#COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/;nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[-1]}"))
COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/;nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[-1]}"))
COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/;nodelist | sed -e s/^/$PREFIX/)" -- "${COMP_WORDS[COMP_CWORD]}"))
}
_confluent_ng_completion()
{
@@ -252,17 +246,17 @@ _confluent_ng_completion()
if [ $NUMARGS -gt 2 ]; then
return;
fi
INPUT=${COMP_WORDS[-1]}
INPUT=${COMP_WORDS[COMP_CWORD]}
INPUT=${INPUT##*,-}
INPUT=${INPUT##*,}
INPUT=${INPUT##*@}
PREFIX=""
if [ "$INPUT" != "${COMP_WORDS[-1]}" ]; then
PREFIX=${COMP_WORDS[-1]}
if [ "$INPUT" != "${COMP_WORDS[COMP_CWORD]}" ]; then
PREFIX=${COMP_WORDS[COMP_CWORD]}
PREFIX=$(echo $PREFIX | sed -e 's/,[^,@-]*$/,/' -e 's/,-[^,@]*$/,-/' -e 's/@[^,@]*/@/')
fi
COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/)" -- "${COMP_WORDS[-1]}"))
COMPREPLY=($(compgen -W "$(confetty show /nodegroups|sed -e 's/\///' -e s/^/$PREFIX/)" -- "${COMP_WORDS[COMP_CWORD]}"))
}
complete -F _confluent_nodeattrib_completion nodeattrib
complete -F _confluent_nodeattrib_completion nodegroupattrib

View File

@@ -22,6 +22,11 @@ given as a node expression, as documented in the man page for nodeattribexpressi
If combined with `-x`, will show all differing values except those indicated
by `-x`
* `-e`, `--extra`:
Read settings that are generally not needed, but may be slow to retrieve.
Notably this includes the IMM category of Lenovo systems. The most popular
IMM settings are available through faster 'bmc' attributes.
* `-x`, `--exclude`:
Rather than listing only the specified configuration parameters, list all
attributes except for the specified ones

View File

@@ -13,7 +13,11 @@ nodefirmware(8) -- Report firmware information on confluent nodes
will retrieve all firmware, but can be directed to fetch specific firmware by
calling out the name of the firmware (e.g. uefi or xcc) or request reading only
core firmware firmware by using the word 'core', which is generally a quicker
operation.
operation. Different hardwaremanagement.method indicated plugins may have
different capabilities available. For example, the 'core' distinction may
not be relevant to redfish. Additionally, the Lenovo XCC makes certain
information available over IPMI that is not otherwise available (for example
the FPGA version where applicable).
In the update form, it accepts a single file and attempts to update it using
the out of band facilities. Firmware updates can end in one of three states:

View File

@@ -7,7 +7,7 @@ nodesupport(8) -- Utilities for interacting with vendor support
## DESCRIPTION
`nodesupport` provides capabilities associated with interactiong with support.
`nodesupport` provides capabilities associated with interacting with support.
Currently it only has the `servicedata` subcommand. `servicedata` takes
an argument that is either a directory name (that can be used for a single node
or multiple nodes) or a file name (only to be used with single node noderange).
@@ -16,6 +16,9 @@ connects to the managed system, so it will download to the remote system if runn
remotely and will download to the collective.manager indicated system if
running in collective mode.
Note that due to vendor filename requirements, any filename may have vendor
specific suffixes added to any file produced.
## EXAMPLES
* Download support data from a single node to a specific filename

View File

@@ -88,6 +88,9 @@ def show_collective():
s = client.Command().connection
tlvdata.send(s, {'collective': {'operation': 'show'}})
res = tlvdata.recv(s)
if 'error' in res:
print(res['error'])
return
if 'error' in res['collective']:
print(res['collective']['error'])
return

View File

@@ -32,7 +32,7 @@ import confluent.main
import multiprocessing
if __name__ == '__main__':
multiprocessing.freeze_support()
confluent.main.run()
confluent.main.run(sys.argv)
#except:
# pass
#p.disable()

View File

@@ -7,7 +7,7 @@ import tempfile
def get_openssl_conf_location():
if exists('/etc/pki/tls/openssl.cnf'):
return '/etc/pki/tls/openssl.cnf'
elif exists('/etc/ssl/openssl.cnf');
elif exists('/etc/ssl/openssl.cnf'):
return '/etc/ssl/openssl.cnf'
else:
raise Exception("Cannot find openssl config file")

View File

@@ -3,8 +3,12 @@ cd `dirname $0`
PKGNAME=$(basename $(pwd))
DPKGNAME=$(basename $(pwd) | sed -e s/_/-/)
OPKGNAME=$(basename $(pwd) | sed -e s/_/-/)
PYEXEC=python3
DSCARGS="--with-python3=True --with-python2=False"
if grep wheezy /etc/os-release; then
DPKGNAME=python-$DPKGNAME
PYEXEC=python
DSCARGS=""
fi
cd ..
mkdir -p /tmp/confluent # $DPKGNAME
@@ -24,15 +28,15 @@ install-scripts=/opt/confluent/bin
package=$DPKGNAME
EOF
python setup.py sdist > /dev/null 2>&1
py2dsc dist/*.tar.gz
$PYEXEC setup.py sdist > /dev/null 2>&1
py2dsc $DSCARGS dist/*.tar.gz
shopt -s extglob
cd deb_dist/!(*.orig)/
if [ "$OPKGNAME" = "confluent-server" ]; then
if grep wheezy /etc/os-release; then
sed -i 's/^\(Depends:.*\)/\1, python-confluent-client, python-lxml, python-eficompressor, python-pycryptodomex, python-dateutil, python-pyopenssl/' debian/control
else
sed -i 's/^\(Depends:.*\)/\1, confluent-client, python-lxml, python-eficompressor, python-pycryptodome, python-dateutil/' debian/control
sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket/' debian/control
fi
if grep wheezy /etc/os-release; then
echo 'confluent_client python-confluent-client' >> debian/pydist-overrides

View File

@@ -27,6 +27,8 @@ from fnmatch import fnmatch
import hashlib
import hmac
import multiprocessing
import os
import pwd
import confluent.userutil as userutil
import confluent.util as util
pam = None
@@ -58,9 +60,9 @@ _allowedbyrole = {
'/node*/configuration/*',
],
'update': [
'/discovery/*',
'/discovery/*',
'/networking/macs/rescan',
'/node*/power/state',
'/node*/power/state',
'/node*/power/reseat',
'/node*/attributes/*',
'/node*/media/*tach',
@@ -98,17 +100,6 @@ _deniedbyrole = {
}
def _prune_passcache():
# This function makes sure we don't remember a passphrase in memory more
# than 10 seconds
while True:
curtime = time.time()
for passent in _passcache.iterkeys():
if passent[2] < curtime - 90:
del _passcache[passent]
eventlet.sleep(90)
def _get_usertenant(name, tenant=False):
"""_get_usertenant
@@ -268,12 +259,36 @@ def check_user_passphrase(name, passphrase, operation=None, element=None, tenant
_passcache[(user, tenant)] = hashlib.sha256(passphrase).digest()
return authorize(user, element, tenant, operation)
if pam:
pammy = pam.pam()
usergood = pammy.authenticate(user, passphrase)
del pammy
pwe = None
try:
pwe = pwd.getpwnam(user)
except KeyError:
#pam won't work if the user doesn't exist, don't go further
eventlet.sleep(0.05) # stall even on test for existence of a username
return None
if os.getuid() != 0:
# confluent is running with reduced privilege, however, pam_unix refuses
# to let a non-0 user check anothers password.
# We will fork and the child will assume elevated privilege to
# get unix_chkpwd helper to enable checking /etc/shadow
pid = os.fork()
if not pid:
usergood = False
try:
# we change to the uid we are trying to authenticate as, because
# pam_unix uses unix_chkpwd which reque
os.setuid(pwe.pw_uid)
usergood = pam.authenticate(user, passphrase, service=_pamservice)
finally:
os._exit(0 if usergood else 1)
usergood = os.waitpid(pid, 0)[1] == 0
else:
# We are running as root, we don't need to fork in order to authenticate the
# user
usergood = pam.authenticate(user, passphrase, service=_pamservice)
if usergood:
_passcache[(user, tenant)] = hashlib.sha256(passphrase).digest()
return authorize(user, element, tenant, operation, skipuserobj=False)
return authorize(user, element, tenant, operation, skipuserobj=False)
eventlet.sleep(0.05) # stall even on test for existence of a username
return None

View File

@@ -73,20 +73,12 @@ def connect_to_leader(cert=None, name=None, leader=None):
with cfm._initlock:
banner = tlvdata.recv(remote) # the banner
vers = banner.split()[2]
pvers = 0
reqver = 4
if vers == b'v0':
pvers = 2
elif vers == b'v1':
pvers = 4
if sys.version_info[0] < 3:
pvers = 2
reqver = 2
if vers != b'v2':
raise Exception('This instance only supports protocol 2, synchronize versions between collective members')
tlvdata.recv(remote) # authpassed... 0..
if name is None:
name = get_myname()
tlvdata.send(remote, {'collective': {'operation': 'connect',
'protover': reqver,
'name': name,
'txcount': cfm._txcount}})
keydata = tlvdata.recv(remote)
@@ -160,15 +152,15 @@ def connect_to_leader(cert=None, name=None, leader=None):
raise
currentleader = leader
#spawn this as a thread...
follower = eventlet.spawn(follow_leader, remote, pvers)
follower = eventlet.spawn(follow_leader, remote, leader)
return True
def follow_leader(remote, proto):
def follow_leader(remote, leader):
global currentleader
cleanexit = False
try:
cfm.follow_channel(remote, proto)
cfm.follow_channel(remote)
except greenlet.GreenletExit:
cleanexit = True
finally:
@@ -176,8 +168,8 @@ def follow_leader(remote, proto):
log.log({'info': 'Previous following cleanly closed',
'subsystem': 'collective'})
return
log.log({'info': 'Current leader has disappeared, restarting '
'collective membership', 'subsystem': 'collective'})
log.log({'info': 'Current leader ({0}) has disappeared, restarting '
'collective membership'.format(leader), 'subsystem': 'collective'})
# The leader has folded, time to startup again...
cfm.stop_following()
currentleader = None
@@ -430,7 +422,6 @@ def handle_connection(connection, cert, request, local=False):
tlvdata.send(connection, collinfo)
if 'connect' == operation:
drone = request['name']
folver = request.get('protover', 2)
droneinfo = cfm.get_collective_member(drone)
if not (droneinfo and util.cert_matches(droneinfo['fingerprint'],
cert)):
@@ -479,7 +470,7 @@ def handle_connection(connection, cert, request, local=False):
connection.sendall(cfgdata)
#tlvdata.send(connection, {'tenants': 0}) # skip the tenants for now,
# so far unused anyway
if not cfm.relay_slaved_requests(drone, connection, folver):
if not cfm.relay_slaved_requests(drone, connection):
if not retrythread: # start a recovery if everyone else seems
# to have disappeared
retrythread = eventlet.spawn_after(30 + random.random(),

View File

@@ -280,7 +280,7 @@ node = {
'console.method': {
'description': ('Indicate the method used to access the console of '
'the managed node.'),
'validvalues': ('ssh', 'ipmi'),
'validvalues': ('ssh', 'ipmi', 'tsmsol'),
},
# 'virtualization.host': {
# 'description': ('Hypervisor where this node does/should reside'),

View File

@@ -71,6 +71,7 @@ import eventlet.green.select as select
import eventlet.green.threading as gthread
import fnmatch
import json
import msgpack
import operator
import os
import random
@@ -101,10 +102,6 @@ _cfgstore = None
_pendingchangesets = {}
_txcount = 0
_hasquorum = True
if sys.version_info[0] >= 3:
lowestver = 4
else:
lowestver = 2
_attraliases = {
'bmc': 'hardwaremanagement.manager',
@@ -317,8 +314,8 @@ def exec_on_leader(function, *args):
while xid in _pendingchangesets:
xid = confluent.util.stringify(base64.b64encode(os.urandom(8)))
_pendingchangesets[xid] = event.Event()
rpcpayload = cPickle.dumps({'function': function, 'args': args,
'xid': xid}, protocol=cfgproto)
rpcpayload = msgpack.packb({'function': function, 'args': args,
'xid': xid}, use_bin_type=False)
rpclen = len(rpcpayload)
cfgleader.sendall(struct.pack('!Q', rpclen))
cfgleader.sendall(rpcpayload)
@@ -328,6 +325,11 @@ def exec_on_leader(function, *args):
def exec_on_followers(fnname, *args):
pushes = eventlet.GreenPool()
# Check health of collective prior to attempting
for _ in pushes.starmap(
_push_rpc, [(cfgstreams[s], b'') for s in cfgstreams]):
pass
if len(cfgstreams) < (len(_cfgstore['collective']) // 2):
# the leader counts in addition to registered streams
raise exc.DegradedCollective()
@@ -338,8 +340,8 @@ def exec_on_followers_unconditional(fnname, *args):
global _txcount
pushes = eventlet.GreenPool()
_txcount += 1
payload = cPickle.dumps({'function': fnname, 'args': args,
'txcount': _txcount}, protocol=lowestver)
payload = msgpack.packb({'function': fnname, 'args': args,
'txcount': _txcount}, use_bin_type=False)
for _ in pushes.starmap(
_push_rpc, [(cfgstreams[s], payload) for s in cfgstreams]):
pass
@@ -386,9 +388,15 @@ def init_masterkey(password=None, autogen=True):
def _push_rpc(stream, payload):
with _rpclock:
stream.sendall(struct.pack('!Q', len(payload)))
if len(payload):
stream.sendall(payload)
try:
stream.sendall(struct.pack('!Q', len(payload)))
if len(payload):
stream.sendall(payload)
return True
except Exception:
logException()
del cfgstreams[stream]
stream.close()
def decrypt_value(cryptvalue,
@@ -554,14 +562,9 @@ def set_global(globalname, value, sync=True):
ConfigManager._bg_sync_to_file()
cfgstreams = {}
def relay_slaved_requests(name, listener, vers):
def relay_slaved_requests(name, listener):
global cfgleader
global _hasquorum
global lowestver
if vers > 2 and sys.version_info[0] < 3:
vers = 2
if vers < lowestver:
lowestver = vers
pushes = eventlet.GreenPool()
if name not in _followerlocks:
_followerlocks[name] = gthread.RLock()
@@ -578,11 +581,18 @@ def relay_slaved_requests(name, listener, vers):
lh = StreamHandler(listener)
_hasquorum = len(cfgstreams) >= (
len(_cfgstore['collective']) // 2)
payload = cPickle.dumps({'quorum': _hasquorum}, protocol=lowestver)
for _ in pushes.starmap(
_push_rpc,
[(cfgstreams[s], payload) for s in cfgstreams]):
pass
_newquorum = None
while _hasquorum != _newquorum:
if _newquorum is not None:
_hasquorum = _newquorum
payload = msgpack.packb({'quorum': _hasquorum}, use_bin_type=False)
for _ in pushes.starmap(
_push_rpc,
[(cfgstreams[s], payload) for s in cfgstreams]):
pass
_newquorum = len(cfgstreams) >= (
len(_cfgstore['collective']) // 2)
_hasquorum = _newquorum
if _hasquorum and _pending_collective_updates:
apply_pending_collective_updates()
msg = lh.get_next_msg()
@@ -597,15 +607,21 @@ def relay_slaved_requests(name, listener, vers):
if not nrpc:
raise Exception('Truncated client error')
rpc += nrpc
rpc = cPickle.loads(rpc)
rpc = msgpack.unpackb(rpc, raw=False)
exc = None
if not (rpc['function'].startswith('_rpc_') or rpc['function'].endswith('_collective_member')):
raise Exception('Unsupported function {0} called'.format(rpc['function']))
try:
globals()[rpc['function']](*rpc['args'])
except ValueError as ve:
exc = ['ValueError', str(ve)]
except Exception as e:
exc = e
exc = ['Exception', str(e)]
if 'xid' in rpc:
_push_rpc(listener, cPickle.dumps({'xid': rpc['xid'],
'exc': exc}, protocol=vers))
res = _push_rpc(listener, msgpack.packb({'xid': rpc['xid'],
'exc': exc}, use_bin_type=False))
if not res:
break
try:
msg = lh.get_next_msg()
except Exception:
@@ -622,7 +638,7 @@ def relay_slaved_requests(name, listener, vers):
if cfgstreams:
_hasquorum = len(cfgstreams) >= (
len(_cfgstore['collective']) // 2)
payload = cPickle.dumps({'quorum': _hasquorum}, protocol=lowestver)
payload = msgpack.packb({'quorum': _hasquorum}, use_bin_type=False)
for _ in pushes.starmap(
_push_rpc,
[(cfgstreams[s], payload) for s in cfgstreams]):
@@ -650,7 +666,9 @@ class StreamHandler(object):
if confluent.util.monotonic_time() > self.expiry:
return None
if confluent.util.monotonic_time() > self.keepalive:
_push_rpc(self.sock, b'') # nulls are a keepalive
res = _push_rpc(self.sock, b'') # nulls are a keepalive
if not res:
return None
self.keepalive = confluent.util.monotonic_time() + 20
self.expiry = confluent.util.monotonic_time() + 60
msg = self.sock.recv(8)
@@ -662,19 +680,15 @@ class StreamHandler(object):
self.sock = None
def stop_following(replacement=None, proto=2):
def stop_following(replacement=None):
with _leaderlock:
global cfgleader
global cfgproto
if cfgleader and not isinstance(cfgleader, bool):
try:
cfgleader.close()
except Exception:
pass
cfgleader = replacement
if proto > 2 and sys.version_info[0] < 3:
proto = 2
cfgproto = proto
def stop_leading():
for stream in list(cfgstreams):
@@ -732,15 +746,14 @@ def commit_clear():
ConfigManager._bg_sync_to_file()
cfgleader = None
cfgproto = 2
def follow_channel(channel, proto=2):
def follow_channel(channel):
global _txcount
global _hasquorum
try:
stop_leading()
stop_following(channel, proto)
stop_following(channel)
lh = StreamHandler(channel)
msg = lh.get_next_msg()
while msg:
@@ -752,22 +765,31 @@ def follow_channel(channel, proto=2):
if not nrpc:
raise Exception('Truncated message error')
rpc += nrpc
rpc = cPickle.loads(rpc)
rpc = msgpack.unpackb(rpc, raw=False)
if 'txcount' in rpc:
_txcount = rpc['txcount']
if 'function' in rpc:
if not (rpc['function'].startswith('_true') or rpc['function'].startswith('_rpc')):
raise Exception("Received unsupported function call: {0}".format(rpc['function']))
try:
globals()[rpc['function']](*rpc['args'])
except Exception as e:
print(repr(e))
if 'xid' in rpc and rpc['xid']:
if rpc.get('exc', None):
_pendingchangesets[rpc['xid']].send_exception(rpc['exc'])
exctype, excstr = rpc['exc']
if exctype == 'ValueError':
exc = ValueError(excstr)
else:
exc = Exception(excstr)
_pendingchangesets[rpc['xid']].send_exception(exc)
else:
_pendingchangesets[rpc['xid']].send()
if 'quorum' in rpc:
_hasquorum = rpc['quorum']
_push_rpc(channel, b'') # use null as ACK
res = _push_rpc(channel, b'') # use null as ACK
if not res:
break
msg = lh.get_next_msg()
finally:
# mark the connection as broken
@@ -1137,6 +1159,8 @@ class ConfigManager(object):
attribute, match = expression.split('=')
else:
raise Exception('Invalid Expression')
if attribute.startswith('secret.'):
raise Exception('Filter by secret attributes is not supported')
for node in nodes:
try:
currvals = [self._cfgstore['nodes'][node][attribute]['value']]
@@ -1381,7 +1405,7 @@ class ConfigManager(object):
return exec_on_leader('_rpc_master_set_user', self.tenant, name,
attributemap)
if cfgstreams:
exec_on_followers('_rpc_set_user', self.tenant, name)
exec_on_followers('_rpc_set_user', self.tenant, name, attributemap)
self._true_set_user(name, attributemap)
def _true_set_user(self, name, attributemap):
@@ -1461,9 +1485,10 @@ class ConfigManager(object):
self._cfgstore['users'][name]['displayname'] = displayname
_cfgstore['main']['idmap'][uid] = {
'tenant': self.tenant,
'username': name
'username': name,
'role': role,
}
if attributemap is not None:
if attributemap:
self._true_set_user(name, attributemap)
_mark_dirtykey('users', name, self.tenant)
_mark_dirtykey('idmap', uid)
@@ -1626,6 +1651,7 @@ class ConfigManager(object):
if group in self._cfgstore['nodes'][node]['groups']:
self._cfgstore['nodes'][node]['groups'].remove(group)
self._node_removed_from_group(node, group, changeset)
_mark_dirtykey('nodes', node, self.tenant)
for node in nodes:
if node not in self._cfgstore['nodes']:
self._cfgstore['nodes'][node] = {'groups': [group]}
@@ -1867,6 +1893,8 @@ class ConfigManager(object):
eventlet.spawn_n(_do_notifier, self, watcher, callback)
def del_nodes(self, nodes):
if isinstance(nodes, set):
nodes = list(nodes) # msgpack can't handle set
if cfgleader: # slaved to a collective
return exec_on_leader('_rpc_master_del_nodes', self.tenant,
nodes)

View File

@@ -362,13 +362,17 @@ class ConsoleHandler(object):
if self.reconnect:
self.reconnect.cancel()
self.reconnect = None
strerror = ('The console.method attribute for this node is '
'not configured,\r\nset it to a valid value for console '
'function')
try:
self._console = list(plugin.handle_path(
self._plugin_path.format(self.node),
"create", self.cfgmgr))[0]
except (exc.NotImplementedException, exc.NotFoundException):
self._console = None
except:
except Exception as e:
strerror = str(e)
if _tracelog:
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
@@ -381,13 +385,9 @@ class ConsoleHandler(object):
self._send_rcpts({'connectstate': self.connectstate,
'error': self.error})
self.feedbuffer(
'\x1bc\x1b[2J\x1b[1;1H[The console.method attribute for this node is '
'not configured,\r\nset it to a valid value for console '
'function]')
'\x1bc\x1b[2J\x1b[1;1H[{0}]'.format(strerror))
self._send_rcpts(
'\x1bc\x1b[2J\x1b[1;1H[The console.method attribute for this node is '
'not configured,\r\nset it to a valid value for console '
'function]')
'\x1bc\x1b[2J\x1b[1;1H[{0}]'.format(strerror))
self.clearerror = True
return
if self.clearerror:

View File

@@ -60,19 +60,14 @@ import eventlet.greenpool as greenpool
import eventlet.green.ssl as ssl
import eventlet.queue as queue
import itertools
import msgpack
import os
try:
import cPickle as pickle
pargs = {}
except ImportError:
import pickle
pargs = {'encoding': 'utf-8'}
import socket
import struct
import sys
pluginmap = {}
dispatch_plugins = (b'ipmi', u'ipmi')
dispatch_plugins = (b'ipmi', u'ipmi', b'redfish', u'redfish', b'tsmsol', u'tsmsol')
def seek_element(currplace, currkey):
@@ -221,10 +216,14 @@ def _init_core():
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'extra': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
'advanced': PluginRoute({
'pluginattrs': ['hardwaremanagement.method'],
'default': 'ipmi',
}),
}),
},
},
'storage': {
@@ -417,18 +416,22 @@ def create_user(inputdata, configmanager):
try:
username = inputdata['name']
del inputdata['name']
role = inputdata['role']
del inputdata['role']
except (KeyError, ValueError):
raise exc.InvalidArgumentException()
configmanager.create_user(username, attributemap=inputdata)
raise exc.InvalidArgumentException('Missing user name or role')
configmanager.create_user(username, role, attributemap=inputdata)
def create_usergroup(inputdata, configmanager):
try:
groupname = inputdata['name']
role = inputdata['role']
del inputdata['name']
del inputdata['role']
except (KeyError, ValueError):
raise exc.InvalidArgumentException()
configmanager.create_usergroup(groupname)
raise exc.InvalidArgumentException("Missing user name or role")
configmanager.create_usergroup(groupname, role)
def update_usergroup(groupname, attribmap, configmanager):
@@ -689,16 +692,22 @@ def handle_dispatch(connection, cert, dispatch, peername):
cfm.get_collective_member(peername)['fingerprint'], cert):
connection.close()
return
pversion = 0
if bytearray(dispatch)[0] == 0x80:
pversion = bytearray(dispatch)[1]
dispatch = pickle.loads(dispatch, **pargs)
if dispatch[0:2] != b'\x01\x03': # magic value to indicate msgpack
# We only support msgpack now
# The magic should preclude any pickle, as the first byte can never be
# under 0x20 or so.
connection.close()
return
dispatch = msgpack.unpackb(dispatch[2:], raw=False)
configmanager = cfm.ConfigManager(dispatch['tenant'])
nodes = dispatch['nodes']
inputdata = dispatch['inputdata']
operation = dispatch['operation']
pathcomponents = dispatch['path']
routespec = nested_lookup(noderesources, pathcomponents)
inputdata = msg.get_input_message(
pathcomponents, operation, inputdata, nodes, dispatch['isnoderange'],
configmanager)
plugroute = routespec.routeinfo
plugpath = None
nodesbyhandler = {}
@@ -728,18 +737,26 @@ def handle_dispatch(connection, cert, dispatch, peername):
configmanager=configmanager,
inputdata=inputdata))
for res in itertools.chain(*passvalues):
_forward_rsp(connection, res, pversion)
_forward_rsp(connection, res)
except Exception as res:
_forward_rsp(connection, res, pversion)
_forward_rsp(connection, res)
connection.sendall('\x00\x00\x00\x00\x00\x00\x00\x00')
def _forward_rsp(connection, res, pversion):
def _forward_rsp(connection, res):
try:
r = pickle.dumps(res, protocol=pversion)
except TypeError:
r = pickle.dumps(Exception(
'Cannot serialize error, check collective.manager error logs for details' + str(res)), protocol=pversion)
r = res.serialize()
except AttributeError:
if isinstance(res, Exception):
r = msgpack.packb(['Exception', str(res)], use_bin_type=False)
else:
r = msgpack.packb(
['Exception', 'Unable to serialize response ' + repr(res)],
use_bin_type=False)
except Exception as e:
r = msgpack.packb(
['Exception', 'Unable to serialize response ' + repr(res) + ' due to ' + str(e)],
use_bin_type=False)
rlen = len(r)
if not rlen:
return
@@ -830,7 +847,7 @@ def handle_node_request(configmanager, inputdata, operation,
del pathcomponents[0:2]
passvalues = queue.Queue()
plugroute = routespec.routeinfo
inputdata = msg.get_input_message(
msginputdata = msg.get_input_message(
pathcomponents, operation, inputdata, nodes, isnoderange,
configmanager)
if 'handler' in plugroute: # fixed handler definition, easy enough
@@ -841,7 +858,7 @@ def handle_node_request(configmanager, inputdata, operation,
passvalue = hfunc(
nodes=nodes, element=pathcomponents,
configmanager=configmanager,
inputdata=inputdata)
inputdata=msginputdata)
if isnoderange:
return passvalue
elif isinstance(passvalue, console.Console):
@@ -894,13 +911,13 @@ def handle_node_request(configmanager, inputdata, operation,
workers.spawn(addtoqueue, passvalues, hfunc, {'nodes': nodesbyhandler[hfunc],
'element': pathcomponents,
'configmanager': configmanager,
'inputdata': inputdata})
'inputdata': msginputdata})
for manager in nodesbymanager:
numworkers += 1
workers.spawn(addtoqueue, passvalues, dispatch_request, {
'nodes': nodesbymanager[manager], 'manager': manager,
'element': pathcomponents, 'configmanager': configmanager,
'inputdata': inputdata, 'operation': operation})
'inputdata': inputdata, 'operation': operation, 'isnoderange': isnoderange})
if isnoderange or not autostrip:
return iterate_queue(numworkers, passvalues)
else:
@@ -944,7 +961,7 @@ def addtoqueue(theq, fun, kwargs):
def dispatch_request(nodes, manager, element, configmanager, inputdata,
operation):
operation, isnoderange):
a = configmanager.get_collective_member(manager)
try:
remote = socket.create_connection((a['address'], 13001))
@@ -978,10 +995,10 @@ def dispatch_request(nodes, manager, element, configmanager, inputdata,
pvers = 2
tlvdata.recv(remote)
myname = collective.get_myname()
dreq = pickle.dumps({'name': myname, 'nodes': list(nodes),
'path': element,'tenant': configmanager.tenant,
'operation': operation, 'inputdata': inputdata},
protocol=pvers)
dreq = b'\x01\x03' + msgpack.packb(
{'name': myname, 'nodes': list(nodes),
'path': element,'tenant': configmanager.tenant,
'operation': operation, 'inputdata': inputdata, 'isnoderange': isnoderange}, use_bin_type=False)
tlvdata.send(remote, {'dispatch': {'name': myname, 'length': len(dreq)}})
remote.sendall(dreq)
while True:
@@ -1029,11 +1046,13 @@ def dispatch_request(nodes, manager, element, configmanager, inputdata,
return
rsp += nrsp
try:
rsp = pickle.loads(rsp, **pargs)
except UnicodeDecodeError:
rsp = pickle.loads(rsp, encoding='latin1')
rsp = msg.msg_deserialize(rsp)
except Exception:
rsp = exc.deserialize_exc(rsp)
if isinstance(rsp, Exception):
raise rsp
if not rsp:
raise Exception('Error in cross-collective serialize/deserialze, see remote logs')
yield rsp

View File

@@ -107,6 +107,8 @@ nodehandlers = {
'service:management-hardware.Lenovo:lenovo-xclarity-controller': xcc,
'service:management-hardware.IBM:integrated-management-module2': imm,
'pxe-client': pxeh,
'onie-switch': None,
'cumulus-switch': None,
'service:io-device.Lenovo:management-module': None,
'service:thinkagile-storage': cpstorage,
'service:lenovo-tsm': tsm,
@@ -114,6 +116,8 @@ nodehandlers = {
servicenames = {
'pxe-client': 'pxe-client',
'onie-switch': 'onie-switch',
'cumulus-switch': 'cumulus-switch',
'service:lenovo-smm': 'lenovo-smm',
'service:management-hardware.Lenovo:lenovo-xclarity-controller': 'lenovo-xcc',
'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2',
@@ -124,6 +128,8 @@ servicenames = {
servicebyname = {
'pxe-client': 'pxe-client',
'onie-switch': 'onie-switch',
'cumulus-switch': 'cumulus-switch',
'lenovo-smm': 'service:lenovo-smm',
'lenovo-xcc': 'service:management-hardware.Lenovo:lenovo-xclarity-controller',
'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2',
@@ -951,9 +957,9 @@ def eval_node(cfg, handler, info, nodename, manual=False):
# raise exc.InvalidArgumentException(errorstr)
# log.log({'error': errorstr})
if encuuid in pending_by_uuid:
pending_by_uuid[encuuid].add(info)
pending_by_uuid[encuuid].append(info)
else:
pending_by_uuid[encuuid] = set([info])
pending_by_uuid[encuuid] = [info]
return
# We found the real smm, replace the list with the actual smm
# to continue
@@ -1096,6 +1102,10 @@ def discover_node(cfg, handler, info, nodename, manual):
info['discostatus'] = 'discovered'
for i in pending_by_uuid.get(curruuid, []):
eventlet.spawn_n(_recheck_single_unknown_info, cfg, i)
try:
del pending_by_uuid[curruuid]
except KeyError:
pass
return True
log.log({'info': 'Detected {0}, but discovery.policy is not set to a '
'value allowing discovery (open or permissive)'.format(

View File

@@ -68,17 +68,21 @@ class NodeHandler(object):
def _savecert(self, certificate):
self._fp = certificate
return True
def get_node_credentials(self, nodename, creds, defuser, defpass):
user = creds.get(nodename, {}).get(
'secret.hardwaremanagementuser', {}).get('value', None)
havecustomcreds = False
if user and not isinstance(user, str):
user = user.decode('utf8')
if user is not None and user != defuser:
havecustomcreds = True
else:
user = defuser
passwd = creds.get(nodename, {}).get(
'secret.hardwaremanagementpassword', {}).get('value', None)
if passwd and not isinstance(passwd, str):
passwd = passwd.decode('utf8')
if passwd is not None and passwd != defpass:
havecustomcreds = True
else:

View File

@@ -98,7 +98,7 @@ class NodeHandler(bmchandler.NodeHandler):
setdata += ',v4Gateway:{0}'.format(gateway)
wc.request('POST', '/data', setdata)
rsp = wc.getresponse()
rspdata = rsp.read()
rspdata = util.stringify(rsp.read())
if '<statusCode>0' not in rspdata:
raise Exception("Error configuring SMM Network")
return
@@ -145,7 +145,7 @@ class NodeHandler(bmchandler.NodeHandler):
authdata['password'] = password
wc.request('POST', '/data/login', urlencode(authdata), headers)
rsp = wc.getresponse()
rspdata = rsp.read()
rspdata = util.stringify(rsp.read())
if 'authResult>0' in rspdata:
tokens = fromstring(rspdata)
st2 = tokens.findall('st2')[0].text
@@ -181,6 +181,10 @@ class NodeHandler(bmchandler.NodeHandler):
'secret.hardwaremanagementuser', {}).get('value', 'USERID')
passwd = creds.get(nodename, {}).get(
'secret.hardwaremanagementpassword', {}).get('value', 'PASSW0RD')
if not isinstance(username, str):
username = username.decode('utf8')
if not isinstance(passwd, str):
passwd = passwd.decode('utf8')
if passwd == 'PASSW0RD' and self.ruleset:
raise Exception('Cannot support default password and setting password rules at same time')
if passwd == 'PASSW0RD':

View File

@@ -22,6 +22,7 @@ try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
getaddrinfo = eventlet.support.greendns.getaddrinfo
webclient = eventlet.import_patched('pyghmi.util.webclient')
@@ -43,6 +44,13 @@ class NodeHandler(generic.NodeHandler):
self.atdefault = True
super(NodeHandler, self).__init__(info, configmanager)
def scan(self):
c = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert)
i = c.grab_json_response('/redfish/v1/')
uuid = i.get('UUID', None)
if uuid:
self.info['uuid'] = uuid
def validate_cert(self, certificate):
# broadly speaking, merely checks consistency moment to moment,
# but if https_cert gets stricter, this check means something
@@ -54,9 +62,17 @@ class NodeHandler(generic.NodeHandler):
'username': self.DEFAULT_USER,
'password': self.DEFAULT_PASS,
}
wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert)
wc.set_header('Content-Type', 'application/json')
authmode = 0
if not self.trieddefault:
wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert)
rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata))
rsp, status = wc.grab_json_response_with_status('/api/session', authdata)
if status == 403:
wc.set_header('Content-Type', 'application/x-www-form-urlencoded')
authmode = 1
rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata))
else:
authmode = 2
if status > 400:
rsp = util.stringify(rsp)
self.trieddefault = True
@@ -68,9 +84,15 @@ class NodeHandler(generic.NodeHandler):
'default_password': self.DEFAULT_PASS,
'username': self.DEFAULT_USER
}
rsp, status = wc.grab_json_response_with_status('/api/reset-pass', urlencode(passchange))
if authmode == 2:
rsp, status = wc.grab_json_response_with_status('/api/reset-pass', passchange)
else:
rsp, status = wc.grab_json_response_with_status('/api/reset-pass', urlencode(passchange))
authdata['password'] = self.targpass
rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata))
if authmode == 2:
rsp, status = wc.grab_json_response_with_status('/api/session', authdata)
else:
rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata))
self.csrftok = rsp['CSRFToken']
self.channel = rsp['channel']
self.curruser = self.DEFAULT_USER
@@ -85,15 +107,23 @@ class NodeHandler(generic.NodeHandler):
if self.curruser:
authdata['username'] = self.curruser
authdata['password'] = self.currpass
rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata))
if rsp.status != 200:
if authmode != 1:
rsp, status = wc.grab_json_response_with_status('/api/session', authdata)
if authmode == 1 or status == 403:
wc.set_header('Content-Type', 'application/x-www-form-urlencoded')
rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata))
if status != 200:
return None
self.csrftok = rsp['CSRFToken']
self.channel = rsp['channel']
return wc
authdata['username'] = self.targuser
authdata['password'] = self.targpass
rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata))
if authmode != 1:
rsp, status = wc.grab_json_response_with_status('/api/session', authdata)
if authmode == 1 or status == 403:
wc.set_header('Content-Type', 'application/x-www-form-urlencoded')
rsp, status = wc.grab_json_response_with_status('/api/session', urlencode(authdata))
if status != 200:
return None
self.curruser = self.targuser

View File

@@ -15,6 +15,7 @@
import base64
import codecs
import confluent.discovery.handlers.imm as immhandler
import confluent.exceptions as exc
import confluent.netutil as netutil
import confluent.util as util
import errno
@@ -95,7 +96,8 @@ class NodeHandler(immhandler.NodeHandler):
ipmicmd.xraw_command(netfn=0x3a, command=0xf1, data=(1,))
except pygexc.IpmiException as e:
if (e.ipmicode != 193 and 'Unauthorized name' not in str(e) and
'Incorrect password' not in str(e)):
'Incorrect password' not in str(e) and
str(e) != 'Session no longer connected'):
# raise an issue if anything other than to be expected
if disableipmi:
_, _ = wc.grab_json_response_with_status(
@@ -170,7 +172,12 @@ class NodeHandler(immhandler.NodeHandler):
pwdchanged = True
if '_csrf_token' in wc.cookies:
wc.set_header('X-XSRF-TOKEN', wc.cookies['_csrf_token'])
if pwdchanged:
# Remove the minimum change interval, to allow sane
# password changes after provisional changes
self.set_password_policy('')
return (wc, pwdchanged)
return (None, None)
@property
def wc(self):
@@ -204,7 +211,7 @@ class NodeHandler(immhandler.NodeHandler):
# however the target *will* demand a new password... if it's currently
# PASSW0RD
# use TempW0rd42 to avoid divulging a real password on the line
# This is replacing one well known password (PASSW0RD) with another
# This is replacing one well known password (PASSW0RD) with another
# (TempW0rd42)
passwd = 'TempW0rd42'
wc, pwdchanged = self.get_webclient('USERID', 'PASSW0RD', passwd)
@@ -226,9 +233,9 @@ class NodeHandler(immhandler.NodeHandler):
if wc:
return wc
def set_password_policy(self):
def set_password_policy(self, strruleset):
ruleset = {'USER_GlobalMinPassChgInt': '0'}
for rule in self.ruleset.split(','):
for rule in strruleset.split(','):
if '=' not in rule:
continue
name, value = rule.split('=')
@@ -350,15 +357,18 @@ class NodeHandler(immhandler.NodeHandler):
# between hypothetical secure path and today.
dpp = self.configmanager.get_node_attributes(
nodename, 'discovery.passwordrules')
self.ruleset = dpp.get(nodename, {}).get(
strruleset = dpp.get(nodename, {}).get(
'discovery.passwordrules', {}).get('value', '')
wc = self.wc
creds = self.configmanager.get_node_attributes(
self.nodename, ['secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword'], decrypt=True)
user, passwd, isdefault = self.get_node_credentials(nodename, creds, 'USERID', 'PASSW0RD')
self.set_password_policy()
self.set_password_policy(strruleset)
if self._atdefaultcreds:
if isdefault and self.tmppasswd:
raise Exception(
'Request to use default credentials, but refused by target after it has been changed to {0}'.format(self.tmppasswd))
if not isdefault:
self._setup_xcc_account(user, passwd, wc)
self._convert_sha256account(user, passwd, wc)

View File

@@ -30,9 +30,16 @@ pxearchs = {
'\x00\x07': 'uefi-x64',
'\x00\x09': 'uefi-x64',
'\x00\x0b': 'uefi-aarch64',
'\x00\x10': 'uefi-httpboot',
}
def stringify(value):
string = bytes(value)
if not isinstance(string, str):
string = string.decode('utf8')
return string
def decode_uuid(rawguid):
lebytes = struct.unpack_from('<IHH', rawguid[:8])
bebytes = struct.unpack_from('>HHI', rawguid[8:])
@@ -40,23 +47,50 @@ def decode_uuid(rawguid):
lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2]).lower()
def _decode_ocp_vivso(rq, idx, size):
end = idx + size
vivso = {'service-type': 'onie-switch'}
while idx < end:
if rq[idx] == 3:
vivso['machine'] = stringify(rq[idx + 2:idx + 2 + rq[idx + 1]])
elif rq[idx] == 4:
vivso['arch'] = stringify(rq[idx + 2:idx + 2 + rq[idx + 1]])
elif rq[idx] == 5:
vivso['revision'] = stringify(rq[idx + 2:idx + 2 + rq[idx + 1]])
idx += rq[idx + 1] + 2
return '', None, vivso
def find_info_in_options(rq, optidx):
uuid = None
arch = None
vivso = None
ztpurlrequested = False
iscumulus = False
try:
while uuid is None or arch is None:
if rq[optidx] == 53: # DHCP message type
# we want only length 1 and only discover (type 1)
if rq[optidx + 1] != 1 or rq[optidx + 2] != 1:
return uuid, arch
return uuid, arch, vivso
optidx += 3
elif rq[optidx] == 55:
if 239 in rq[optidx + 2:optidx + 2 + rq[optidx + 1]]:
ztpurlrequested = True
optidx += rq[optidx + 1] + 2
elif rq[optidx] == 60:
vci = stringify(rq[optidx + 2:optidx + 2 + rq[optidx + 1]])
if vci.startswith('cumulus-linux'):
iscumulus = True
arch = vci.replace('cumulus-linux', '').strip()
optidx += rq[optidx + 1] + 2
elif rq[optidx] == 97:
if rq[optidx + 1] != 17:
# 16 bytes of uuid and one reserved byte
return uuid, arch
return uuid, arch, vivso
if rq[optidx + 2] != 0: # the reserved byte should be zero,
# anything else would be a new spec that we don't know yet
return uuid, arch
return uuid, arch, vivso
uuid = decode_uuid(rq[optidx + 3:optidx + 19])
optidx += 19
elif rq[optidx] == 93:
@@ -66,11 +100,20 @@ def find_info_in_options(rq, optidx):
if archraw in pxearchs:
arch = pxearchs[archraw]
optidx += 4
elif rq[optidx] == 125:
#vivso = rq[optidx + 2:optidx + 2 + rq[optidx + 1]]
if rq[optidx + 2:optidx + 6] == b'\x00\x00\xa6\x7f': # OCP
return _decode_ocp_vivso(rq, optidx + 7, rq[optidx + 6])
optidx += rq[optidx + 1] + 2
else:
optidx += rq[optidx + 1] + 2
except IndexError:
return uuid, arch
return uuid, arch
pass
if not vivso and iscumulus and ztpurlrequested:
if not uuid:
uuid = ''
vivso = {'service-type': 'cumulus-switch', 'arch': arch}
return uuid, arch, vivso
def snoop(handler, protocol=None):
#TODO(jjohnson2): ipv6 socket and multicast for DHCPv6, should that be
@@ -101,7 +144,14 @@ def snoop(handler, protocol=None):
optidx = rq.index(b'\x63\x82\x53\x63') + 4
except ValueError:
continue
uuid, arch = find_info_in_options(rq, optidx)
uuid, arch, vivso = find_info_in_options(rq, optidx)
if vivso:
# info['modelnumber'] = info['attributes']['enclosure-machinetype-model'][0]
handler({'hwaddr': netaddr, 'uuid': uuid,
'architecture': vivso.get('arch', ''),
'services': (vivso['service-type'],),
'attributes': {'enclosure-machinetype-model': [vivso.get('machine', '')]}})
continue
if uuid is None:
continue
# We will fill out service to have something to byte into,

View File

@@ -84,6 +84,8 @@ def _parse_SrvRply(parsed):
:return:
"""
payload = parsed['payload']
if len(payload) < 4:
return
ecode, ucount = struct.unpack('!HH', bytes(payload[0:4]))
if ecode:
parsed['errorcode'] = ecode
@@ -234,13 +236,20 @@ def _find_srvtype(net, net4, srvtype, addresses, xid):
def _grab_rsps(socks, rsps, interval, xidmap):
r, _, _ = select.select(socks, (), (), interval)
r = None
res = select.select(socks, (), (), interval)
if res:
r = res[0]
while r:
for s in r:
(rsp, peer) = s.recvfrom(9000)
neighutil.refresh_neigh()
_parse_slp_packet(rsp, peer, rsps, xidmap)
r, _, _ = select.select(socks, (), (), interval)
res = select.select(socks, (), (), interval)
if not res:
r = None
else:
r = res[0]
@@ -560,7 +569,7 @@ def scan(srvtypes=_slp_services, addresses=None, localonly=False):
# now to analyze and flesh out the responses
for id in rsps:
if 'service:ipmi' in rsps[id]['services']:
if 'service:ipmi://Athena:623' in rsps[id]['urls']:
if 'service:ipmi://Athena:623' in rsps[id].get('urls', ''):
rsps[id]['services'] = ['service:thinkagile-storage']
else:
continue

View File

@@ -17,7 +17,17 @@
import base64
import json
import msgpack
def deserialize_exc(msg):
excd = msgpack.unpackb(msg, raw=False)
if excd[0] == 'Exception':
return Exception(excd[1])
if excd[0] not in globals():
return Exception('Cannot deserialize: {0}'.format(repr(excd)))
if not issubclass(excd[0], ConfluentException):
return Exception('Cannot deserialize: {0}'.format(repr(excd)))
return globals(excd[0])(*excd[1])
class ConfluentException(Exception):
apierrorcode = 500
@@ -27,6 +37,10 @@ class ConfluentException(Exception):
errstr = ' - '.join((self._apierrorstr, str(self)))
return json.dumps({'error': errstr })
def serialize(self):
return msgpack.packb([self.__class__.__name__, [str(self)]],
use_bin_type=False)
@property
def apierrorstr(self):
if str(self):
@@ -104,16 +118,24 @@ class PubkeyInvalid(ConfluentException):
def __init__(self, text, certificate, fingerprint, attribname, event):
super(PubkeyInvalid, self).__init__(self, text)
self.myargs = (text, certificate, fingerprint, attribname, event)
self.fingerprint = fingerprint
self.attrname = attribname
self.message = text
certtxt = base64.b64encode(certificate)
if not isinstance(certtxt, str):
certtxt = certtxt.decode('utf8')
bodydata = {'message': text,
'event': event,
'fingerprint': fingerprint,
'fingerprintfield': attribname,
'certificate': base64.b64encode(certificate)}
'certificate': certtxt}
self.errorbody = json.dumps(bodydata)
def serialize(self):
return msgpack.packb([self.__class__.__name__, self.myargs],
use_bin_type=False)
def get_error_body(self):
return self.errorbody

View File

@@ -36,20 +36,40 @@ _tracelog = None
def execupdate(handler, filename, updateobj, type, owner, node):
global _tracelog
if type != 'ffdc' and not os.path.exists(filename):
errstr = '{0} does not appear to exist on {1}'.format(
filename, socket.gethostname())
updateobj.handle_progress({'phase': 'error', 'progress': 0.0,
'detail': errstr})
return
if type != 'ffdc':
errstr = False
if not os.path.exists(filename):
errstr = '{0} does not appear to exist on {1}'.format(
filename, socket.gethostname())
elif not os.access(filename, os.R_OK):
errstr = '{0} is not readable by confluent on {1} (ensure confluent user or group can access file and parent directories)'.format(
filename, socket.gethostname())
if errstr:
updateobj.handle_progress({'phase': 'error', 'progress': 0.0,
'detail': errstr})
return
if type == 'ffdc' and os.path.isdir(filename):
filename += '/' + node + '.svcdata'
filename += '/' + node
if 'type' == 'ffdc':
errstr = False
if os.path.exists(filename):
errstr = '{0} already exists on {1}, cannot overwrite'.format(
filename, socket.gethostname())
elif not os.access(os.path.dirname(filename), os.W_OK):
errstr = '{0} directory not writable by confluent user/group on {1}, check the directory and parent directory ownership and permissions'.format(filename, socket.gethostname())
if errstr:
updateobj.handle_progress({'phase': 'error', 'progress': 0.0,
'detail': errstr})
return
try:
if type == 'firmware':
completion = handler(filename, progress=updateobj.handle_progress,
bank=updateobj.bank)
else:
completion = handler(filename, progress=updateobj.handle_progress)
if type == 'ffdc' and completion:
filename = completion
completion = None
if completion is None:
completion = 'complete'
if owner:

View File

@@ -77,13 +77,16 @@ def _daemonize():
print('confluent server starting as pid {0}'.format(thispid))
os._exit(0)
os.closerange(0, 2)
os.umask(63)
os.open(os.devnull, os.O_RDWR)
os.dup2(0, 1)
os.dup2(0, 2)
log.daemonized = True
def _redirectoutput():
os.umask(63)
sys.stdout = log.Logger('stdout', buffered=False)
sys.stderr = log.Logger('stderr', buffered=False)
log.daemonized = True
def _updatepidfile():
@@ -206,7 +209,7 @@ def setlimits():
pass
def run():
def run(args):
setlimits()
try:
signal.signal(signal.SIGUSR1, dumptrace)
@@ -232,7 +235,10 @@ def run():
except (OSError, IOError) as e:
print(repr(e))
sys.exit(1)
_daemonize()
if '-f' not in args:
_daemonize()
if '-o' not in args:
_redirectoutput()
if havefcntl:
_updatepidfile()
signal.signal(signal.SIGINT, terminate)

View File

@@ -24,6 +24,7 @@ import confluent.config.conf as cfgfile
from copy import deepcopy
from datetime import datetime
import confluent.util as util
import msgpack
import json
try:
@@ -76,7 +77,7 @@ def _htmlify_structure(indict):
if datum is None:
nd.append('')
else:
nd.append(datum)
nd.append(util.stringify(datum))
ret += ",".join(nd)
else:
for v in indict:
@@ -84,6 +85,13 @@ def _htmlify_structure(indict):
return ret + '</ul>'
def msg_deserialize(packed):
m = msgpack.unpackb(packed, raw=False)
cls = globals()[m[0]]
if issubclass(cls, ConfluentMessage) or issubclass(cls, ConfluentNodeError):
return cls(*m[1:])
raise Exception("Unknown shenanigans")
class ConfluentMessage(object):
apicode = 200
readonly = False
@@ -105,6 +113,15 @@ class ConfluentMessage(object):
jsonsnippet = json.dumps(datasource, sort_keys=True, separators=(',', ':'))[1:-1]
return jsonsnippet
def serialize(self):
msg = [self.__class__.__name__]
msg.extend(self.myargs)
return msgpack.packb(msg, use_bin_type=False)
@classmethod
def deserialize(cls, data):
return cls(*data)
def raw(self):
"""Return pythonic representation of the response.
@@ -138,6 +155,7 @@ class ConfluentMessage(object):
snippet = ""
for key in pairs:
val = pairs[key]
key = util.stringify(key)
value = self.defaultvalue
if isinstance(val, dict) and 'type' in val:
valtype = val['type']
@@ -210,6 +228,15 @@ class ConfluentNodeError(object):
self.node = node
self.error = errorstr
def serialize(self):
return msgpack.packb(
[self.__class__.__name__, self.node, self.error],
use_bin_type=False)
@classmethod
def deserialize(cls, data):
return cls(*data)
def raw(self):
return {'databynode': {self.node: {'errorcode': self.apicode,
'error': self.error}}}
@@ -220,7 +247,7 @@ class ConfluentNodeError(object):
def strip_node(self, node):
# NOTE(jjohnson2): For single node errors, raise exception to
# trigger what a developer of that medium would expect
raise Exception(self.error)
raise Exception('{0}: {1}'.format(self.node, self.error))
class ConfluentResourceUnavailable(ConfluentNodeError):
@@ -259,9 +286,9 @@ class ConfluentTargetNotFound(ConfluentNodeError):
class ConfluentTargetInvalidCredentials(ConfluentNodeError):
apicode = 502
def __init__(self, node):
def __init__(self, node, errstr='bad credentials'):
self.node = node
self.error = 'bad credentials'
self.error = errstr
def strip_node(self, node):
raise exc.TargetEndpointBadCredentials
@@ -270,6 +297,7 @@ class ConfluentTargetInvalidCredentials(ConfluentNodeError):
class DeletedResource(ConfluentMessage):
notnode = True
def __init__(self, resource):
self.myargs = [resource]
self.kvpairs = {'deleted': resource}
def strip_node(self, node):
@@ -281,6 +309,7 @@ class CreatedResource(ConfluentMessage):
readonly = True
def __init__(self, resource):
self.myargs = [resource]
self.kvpairs = {'created': resource}
def strip_node(self, node):
@@ -292,6 +321,7 @@ class RenamedResource(ConfluentMessage):
readonly = True
def __init__(self, oldname, newname):
self.myargs = (oldname, newname)
self.kvpairs = {'oldname': oldname, 'newname': newname}
def strip_node(self, node):
@@ -300,6 +330,7 @@ class RenamedResource(ConfluentMessage):
class RenamedNode(ConfluentMessage):
def __init__(self, name, rename):
self.myargs = (name, rename)
self.desc = 'New Name'
kv = {'rename': {'value': rename}}
self.kvpairs = {name: kv}
@@ -310,13 +341,16 @@ class AssignedResource(ConfluentMessage):
readonly = True
def __init__(self, resource):
self.myargs = [resource]
self.kvpairs = {'assigned': resource}
class ConfluentChoiceMessage(ConfluentMessage):
valid_values = set()
valid_paramset = {}
def __init__(self, node, state):
self.myargs = (node, state)
self.stripped = False
self.kvpairs = {
node: {
@@ -338,6 +372,7 @@ class ConfluentChoiceMessage(ConfluentMessage):
snippet = ''
for key in pairdata:
val = pairdata[key]
key = util.stringify(key)
snippet += key + ':<select name="%s">' % key
valid_values = self.valid_values
if key in self.valid_paramset:
@@ -389,6 +424,7 @@ class LinkRelation(ConfluentMessage):
class ChildCollection(LinkRelation):
def __init__(self, collname, candelete=False):
self.myargs = (collname, candelete)
self.rel = 'item'
self.href = collname
self.candelete = candelete
@@ -509,9 +545,21 @@ class InputFirmwareUpdate(ConfluentMessage):
raise Exception('User requested substitutions, but code is '
'written against old api, code must be fixed or '
'skip {} expansion')
if self.filebynode[node].startswith('/etc/confluent'):
raise Exception(
'File transfer with /etc/confluent is not supported')
if self.filebynode[node].startswith('/var/log/confluent'):
raise Exception(
'File transfer with /var/log/confluent is not supported')
return self._filename
def nodefile(self, node):
if self.filebynode[node].startswith('/etc/confluent'):
raise Exception(
'File transfer with /etc/confluent is not supported')
if self.filebynode[node].startswith('/var/log/confluent'):
raise Exception(
'File transfer with /var/log/confluent is not supported')
return self.filebynode[node]
class InputMedia(InputFirmwareUpdate):
@@ -530,11 +578,15 @@ class DetachMedia(ConfluentMessage):
class Media(ConfluentMessage):
def __init__(self, node, media):
self.kvpairs = {node: {'name': media.name, 'url': media.url}}
def __init__(self, node, media=None, rawmedia=None):
if media:
rawmedia = {'name': media.name, 'url': media.url}
self.myargs = (node, None, rawmedia)
self.kvpairs = {node: rawmedia}
class SavedFile(ConfluentMessage):
def __init__(self, node, file):
self.myargs = (node, file)
self.kvpairs = {node: {'filename': file}}
class InputAlertData(ConfluentMessage):
@@ -1106,6 +1158,7 @@ class BootDevice(ConfluentChoiceMessage):
}
def __init__(self, node, device, bootmode='unspecified', persistent=False):
self.myargs = (node, device, bootmode, persistent)
if device not in self.valid_values:
raise Exception("Invalid boot device argument passed in:" +
repr(device))
@@ -1204,10 +1257,10 @@ class PowerState(ConfluentChoiceMessage):
def __init__(self, node, state, oldstate=None):
super(PowerState, self).__init__(node, state)
self.myargs = (node, state, oldstate)
if oldstate is not None:
self.kvpairs[node]['oldstate'] = {'value': oldstate}
class BMCReset(ConfluentChoiceMessage):
valid_values = set([
'reset',
@@ -1223,13 +1276,13 @@ class NTPEnabled(ConfluentChoiceMessage):
def __init__(self, node, enabled):
self.stripped = False
self.myargs = (node, enabled)
self.kvpairs = {
node: {
'state': {'value': str(enabled)},
}
}
class EventCollection(ConfluentMessage):
"""A collection of events
@@ -1249,6 +1302,8 @@ class EventCollection(ConfluentMessage):
def __init__(self, events=(), name=None):
eventdata = []
self.notnode = name is None
self.myname = name
self.myargs = (eventdata, name)
for event in events:
entry = {
'id': event.get('id', None),
@@ -1276,6 +1331,10 @@ class AsyncCompletion(ConfluentMessage):
self.stripped = True
self.notnode = True
@classmethod
def deserialize(cls):
raise Exception("Not supported")
def raw(self):
return {'_requestdone': True}
@@ -1286,6 +1345,10 @@ class AsyncMessage(ConfluentMessage):
self.notnode = True
self.msgpair = pair
@classmethod
def deserialize(cls):
raise Exception("Not supported")
def raw(self):
rsp = self.msgpair[1]
rspdict = None
@@ -1317,6 +1380,7 @@ class User(ConfluentMessage):
self.desc = 'foo'
self.stripped = False
self.notnode = name is None
self.myargs = (uid, username, privilege_level, name, expiration)
kvpairs = {'username': {'value': username},
'password': {'value': '', 'type': 'password'},
'privilege_level': {'value': privilege_level},
@@ -1336,7 +1400,11 @@ class UserCollection(ConfluentMessage):
self.notnode = name is None
self.desc = 'list of users'
userlist = []
self.myargs = (userlist, name)
for user in users:
if 'username' in user: # processing an already translated dict
userlist.append(user)
continue
entry = {
'uid': user['uid'],
'username': user['name'],
@@ -1350,8 +1418,10 @@ class UserCollection(ConfluentMessage):
self.kvpairs = {name: {'users': userlist}}
class AlertDestination(ConfluentMessage):
def __init__(self, ip, acknowledge=False, acknowledge_timeout=None, retries=0, name=None):
self.myargs = (ip, acknowledge, acknowledge_timeout, retries, name)
self.desc = 'foo'
self.stripped = False
self.notnode = name is None
@@ -1377,6 +1447,8 @@ class InputAlertDestination(ConfluentMessage):
self.alertcfg = {}
if multinode: # keys are node names
for node in inputdata:
if not isinstance(inputdata[node], dict):
break
self.alertcfg[node] = inputdata[node]
for key in inputdata[node]:
if key not in self.valid_alert_params:
@@ -1389,7 +1461,8 @@ class InputAlertDestination(ConfluentMessage):
else:
self.alertcfg[node][key] = \
self.valid_alert_params[key](inputdata[node][key])
else:
else:
return
for key in inputdata:
if key not in self.valid_alert_params:
raise exc.InvalidArgumentException(
@@ -1413,7 +1486,11 @@ class SensorReadings(ConfluentMessage):
def __init__(self, sensors=(), name=None):
readings = []
self.notnode = name is None
self.myargs = (readings, name)
for sensor in sensors:
if isinstance(sensor, dict):
readings.append(sensor)
continue
sensordict = {'name': sensor.name}
if hasattr(sensor, 'value'):
sensordict['value'] = sensor.value
@@ -1438,6 +1515,13 @@ class Firmware(ConfluentMessage):
readonly = True
def __init__(self, data, name):
for datum in data:
for component in datum:
for field in datum[component]:
tdatum = datum[component]
if isinstance(tdatum[field], datetime):
tdatum[field] = tdatum[field].strftime('%Y-%m-%dT%H:%M:%S')
self.myargs = (data, name)
self.notnode = name is None
self.desc = 'Firmware information'
if self.notnode:
@@ -1450,6 +1534,7 @@ class KeyValueData(ConfluentMessage):
readonly = True
def __init__(self, kvdata, name=None):
self.myargs = (kvdata, name)
self.notnode = name is None
if self.notnode:
self.kvpairs = kvdata
@@ -1459,6 +1544,7 @@ class KeyValueData(ConfluentMessage):
class Array(ConfluentMessage):
def __init__(self, name, disks=None, raid=None, volumes=None,
id=None, capacity=None, available=None):
self.myargs = (name, disks, raid, volumes, id, capacity, available)
self.kvpairs = {
name: {
'type': 'array',
@@ -1473,6 +1559,7 @@ class Array(ConfluentMessage):
class Volume(ConfluentMessage):
def __init__(self, name, volname, size, state, array, stripsize=None):
self.myargs = (name, volname, size, state, array, stripsize)
self.kvpairs = {
name: {
'type': 'volume',
@@ -1487,12 +1574,15 @@ class Volume(ConfluentMessage):
class Disk(ConfluentMessage):
valid_states = set([
'fault',
'jbod',
'unconfigured',
'hotspare',
'rebuilding',
'online',
])
state_aliases = {
'unconfigured bad': 'fault',
'unconfigured good': 'unconfigured',
'global hot spare': 'hotspare',
'dedicated hot spare': 'hotspare',
@@ -1510,6 +1600,8 @@ class Disk(ConfluentMessage):
def __init__(self, name, label=None, description=None,
diskid=None, state=None, serial=None, fru=None,
array=None):
self.myargs = (name, label, description, diskid, state,
serial, fru, array)
state = self._normalize_state(state)
self.kvpairs = {
name: {
@@ -1531,6 +1623,7 @@ class LEDStatus(ConfluentMessage):
readonly = True
def __init__(self, data, name):
self.myargs = (data, name)
self.notnode = name is None
self.desc = 'led status'
@@ -1545,6 +1638,7 @@ class NetworkConfiguration(ConfluentMessage):
def __init__(self, name=None, ipv4addr=None, ipv4gateway=None,
ipv4cfgmethod=None, hwaddr=None):
self.myargs = (name, ipv4addr, ipv4gateway, ipv4cfgmethod, hwaddr)
self.notnode = name is None
self.stripped = False
@@ -1565,6 +1659,7 @@ class HealthSummary(ConfluentMessage):
valid_values = valid_health_values
def __init__(self, health, name=None):
self.myargs = (health, name)
self.stripped = False
self.notnode = name is None
if health not in self.valid_values:
@@ -1577,6 +1672,7 @@ class HealthSummary(ConfluentMessage):
class Attributes(ConfluentMessage):
def __init__(self, name=None, kv=None, desc=''):
self.myargs = (name, kv, desc)
self.desc = desc
nkv = {}
self.notnode = name is None
@@ -1597,6 +1693,7 @@ class ConfigSet(Attributes):
class ListAttributes(ConfluentMessage):
def __init__(self, name=None, kv=None, desc=''):
self.myargs = (name, kv, desc)
self.desc = desc
self.notnode = name is None
if self.notnode:
@@ -1607,6 +1704,7 @@ class ListAttributes(ConfluentMessage):
class MCI(ConfluentMessage):
def __init__(self, name=None, mci=None):
self.myargs = (name, mci)
self.notnode = name is None
self.desc = 'BMC identifier'
@@ -1619,6 +1717,7 @@ class MCI(ConfluentMessage):
class Hostname(ConfluentMessage):
def __init__(self, name=None, hostname=None):
self.myargs = (name, hostname)
self.notnode = name is None
self.desc = 'BMC hostname'
@@ -1630,6 +1729,7 @@ class Hostname(ConfluentMessage):
class DomainName(ConfluentMessage):
def __init__(self, name=None, dn=None):
self.myargs = (name, dn)
self.notnode = name is None
self.desc = 'BMC domain name'
@@ -1644,6 +1744,7 @@ class NTPServers(ConfluentMessage):
readonly = True
def __init__(self, name=None, servers=None):
self.myargs = (name, servers)
self.notnode = name is None
self.desc = 'NTP Server'
@@ -1658,6 +1759,7 @@ class NTPServers(ConfluentMessage):
class NTPServer(ConfluentMessage):
def __init__(self, name=None, server=None):
self.myargs = (name, server)
self.notnode = name is None
self.desc = 'NTP Server'
@@ -1674,6 +1776,7 @@ class License(ConfluentMessage):
readonly = True
def __init__(self, name=None, kvm=None, feature=None, state=None):
self.myargs = (name, kvm, feature, state)
self.notnode = name is None
self.desc = 'License'
@@ -1689,6 +1792,7 @@ class CryptedAttributes(Attributes):
defaulttype = 'password'
def __init__(self, name=None, kv=None, desc=''):
self.myargs = (name, kv, desc)
# for now, just keep the dictionary keys and discard crypt value
self.desc = desc
nkv = {}

View File

@@ -44,7 +44,7 @@ import eventlet
from eventlet.greenpool import GreenPool
import eventlet.semaphore
import re
webclient = eventlet.import_patched('pyghmi.util.webclient')
# The interesting OIDs are:
# lldpLocChassisId - to cross reference (1.0.8802.1.1.2.1.3.2.0)
# lldpLocPortId - for cross referencing.. (1.0.8802.1.1.2.1.3.7.1.3)
@@ -85,6 +85,7 @@ _neighdata = {}
_neighbypeerid = {}
_updatelocks = {}
_chassisidbyswitch = {}
_noaffluent = set([])
def lenovoname(idx, desc):
if desc.isdigit():
@@ -125,7 +126,8 @@ def _dump_neighbordatum(info):
def b64tohex(b64str):
bd = base64.b64decode(b64str)
return ''.join(['{0:02x}'.format(ord(x)) for x in bd])
bd = bytearray(bd)
return ''.join(['{0:02x}'.format(x) for x in bd])
def get_fingerprint(switch, port, configmanager, portmatch):
update_switch_data(switch, configmanager)
@@ -170,21 +172,58 @@ def _init_lldp(data, iname, idx, idxtoportid, switch):
data[iname] = {'port': iname, 'portid': str(idxtoportid[idx]),
'chassisid': _chassisidbyswitch[switch]}
def _extract_neighbor_data_affluent(switch, user, password, cfm, lldpdata):
kv = util.TLSCertVerifier(cfm, switch,
'pubkeys.tls_hardwaremanager').verify_cert
wc = webclient.SecureHTTPConnection(
switch, 443, verifycallback=kv, timeout=5)
wc.set_basic_credentials(user, password)
neighdata = wc.grab_json_response('/affluent/lldp/all')
chassisid = neighdata['chassis']['id']
_chassisidbyswitch[switch] = chassisid,
for record in neighdata['neighbors']:
localport = record['localport']
peerid = '{0}.{1}'.format(
record.get('peerchassisid', '').replace(':', '-').replace('/', '-'),
record.get('peerportid', '').replace(':', '-').replace('/', '-'),
)
portdata = {
'verified': True, # It is over TLS after all
'peerdescription': record.get('peerdescription', None),
'peerchassisid': record['peerchassisid'],
'peername': record['peername'],
'switch': switch,
'chassisid': chassisid,
'portid': record['localport'],
'peerportid': record['peerportid'],
'port': record['localport'],
'peerid': peerid,
}
_neighbypeerid[peerid] = portdata
lldpdata[localport] = portdata
neighdata[switch] = lldpdata
def _extract_neighbor_data_b(args):
"""Build LLDP data about elements connected to switch
args are carried as a tuple, because of eventlet convenience
"""
switch, password, user, force = args[:4]
switch, password, user, cfm, force = args[:5]
vintage = _neighdata.get(switch, {}).get('!!vintage', 0)
now = util.monotonic_time()
if vintage > (now - 60) and not force:
return
lldpdata = {'!!vintage': now}
try:
return _extract_neighbor_data_affluent(switch, user, password, cfm, lldpdata)
except Exception:
pass
conn = snmp.Session(switch, password, user)
sid = None
lldpdata = {'!!vintage': now}
for sysid in conn.walk('1.3.6.1.2.1.1.2'):
sid = str(sysid[1][6:])
_noaffluent.add(switch)
idxtoifname = {}
idxtoportid = {}
_chassisidbyswitch[switch] = sanitize(list(
@@ -267,8 +306,8 @@ def _extract_neighbor_data(args):
return _extract_neighbor_data_b(args)
except Exception as e:
yieldexc = False
if len(args) >= 5:
yieldexc = args[4]
if len(args) >= 6:
yieldexc = args[5]
if yieldexc:
return e
else:
@@ -357,10 +396,3 @@ def _handle_neighbor_query(pathcomponents, configmanager):
raise x
return list_info(parms, listrequested)
def _list_interfaces(switchname, configmanager):
switchcreds = get_switchcreds(configmanager, (switchname,))
switchcreds = switchcreds[0]
conn = snmp.Session(*switchcreds)
ifnames = netutil.get_portnamemap(conn)
return util.natural_sort(ifnames.values())

View File

@@ -45,13 +45,16 @@ from eventlet.greenpool import GreenPool
import eventlet
import eventlet.semaphore
import re
webclient = eventlet.import_patched('pyghmi.util.webclient')
noaffluent = set([])
_macmap = {}
_apimacmap = {}
_macsbyswitch = {}
_nodesbymac = {}
_switchportmap = {}
_neighdata = {}
vintage = None
@@ -127,6 +130,36 @@ def _nodelookup(switch, ifname):
return None
def _affluent_map_switch(args):
switch, password, user, cfm = args
kv = util.TLSCertVerifier(cfm, switch,
'pubkeys.tls_hardwaremanager').verify_cert
wc = webclient.SecureHTTPConnection(
switch, 443, verifycallback=kv, timeout=5)
wc.set_basic_credentials(user, password)
macs = wc.grab_json_response('/affluent/macs/by-port')
_macsbyswitch[switch] = macs
for iface in macs:
nummacs = len(macs[iface])
for mac in macs[iface]:
if mac in _macmap:
_macmap[mac].append((switch, iface, nummacs))
else:
_macmap[mac] = [(switch, iface, nummacs)]
nodename = _nodelookup(switch, iface)
if nodename is not None:
if mac in _nodesbymac and _nodesbymac[mac][0] != nodename:
# For example, listed on both a real edge port
# and by accident a trunk port
log.log({'error': '{0} and {1} described by ambiguous'
' switch topology values'.format(
nodename, _nodesbymac[mac][0])})
_nodesbymac[mac] = (None, None)
else:
_nodesbymac[mac] = (nodename, nummacs)
def _map_switch_backend(args):
"""Manipulate portions of mac address map relevant to a given switch
"""
@@ -144,13 +177,18 @@ def _map_switch_backend(args):
# fallback if ifName is empty
#
global _macmap
if len(args) == 3:
switch, password, user = args
if len(args) == 4:
switch, password, user, cfm = args
if not user:
user = None
else:
switch, password = args
user = None
if switch not in noaffluent:
try:
return _affluent_map_switch(args)
except Exception:
pass
haveqbridge = False
mactobridge = {}
conn = snmp.Session(switch, password, user)
@@ -164,6 +202,7 @@ def _map_switch_backend(args):
*([int(x) for x in oid[-6:]])
)
mactobridge[macaddr] = int(bridgeport)
noaffluent.add(switch)
if not haveqbridge:
for vb in conn.walk('1.3.6.1.2.1.17.4.3.1.2'):
oid, bridgeport = vb

View File

@@ -36,7 +36,7 @@ def get_switchcreds(configmanager, switches):
'secret.hardwaremanagementuser', {}).get('value', None)
if not user:
user = None
switchauth.append((switch, password, user))
switchauth.append((switch, password, user, configmanager))
return switchauth

View File

@@ -0,0 +1,160 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2015-2019 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This plugin provides an ssh implementation comforming to the 'console'
# specification. consoleserver or shellserver would be equally likely
# to use this.
import confluent.exceptions as cexc
import confluent.interface.console as conapi
import confluent.log as log
import confluent.util as util
import pyghmi.exceptions as pygexc
import pyghmi.redfish.command as rcmd
import eventlet
import eventlet.green.ssl as ssl
try:
websocket = eventlet.import_patched('websocket')
wso = websocket.WebSocket
except Exception:
wso = object
def get_conn_params(node, configdata):
if 'secret.hardwaremanagementuser' in configdata:
username = configdata['secret.hardwaremanagementuser']['value']
else:
username = 'USERID'
if 'secret.hardwaremanagementpassword' in configdata:
passphrase = configdata['secret.hardwaremanagementpassword']['value']
else:
passphrase = 'PASSW0RD' # for lack of a better guess
if 'hardwaremanagement.manager' in configdata:
bmc = configdata['hardwaremanagement.manager']['value']
else:
bmc = node
return {
'username': username,
'passphrase': passphrase,
'bmc': bmc,
}
_configattributes = ('secret.hardwaremanagementuser',
'secret.hardwaremanagementpassword',
'hardwaremanagement.manager')
class WrappedWebSocket(wso):
def set_verify_callback(self, callback):
self._certverify = callback
def connect(self, url, **options):
add_tls = url.startswith('wss://')
if add_tls:
hostname, port, resource, _ = websocket._url.parse_url(url)
if hostname[0] != '[' and ':' in hostname:
hostname = '[{0}]'.format(hostname)
if resource[0] != '/':
resource = '/{0}'.format(resource)
url = 'ws://{0}:443{1}'.format(hostname,resource)
else:
return super(WrappedWebSocket, self).connect(url, **options)
self.sock_opt.timeout = options.get('timeout', self.sock_opt.timeout)
self.sock, addrs = websocket._http.connect(url, self.sock_opt, websocket._http.proxy_info(**options),
options.pop('socket', None))
self.sock = ssl.wrap_socket(self.sock, cert_reqs=ssl.CERT_NONE)
# The above is supersedeed by the _certverify, which provides
# known-hosts style cert validaiton
bincert = self.sock.getpeercert(binary_form=True)
if not self._certverify(bincert):
raise pygexc.UnrecognizedCertificate('Unknown certificate', bincert)
try:
self.handshake_response = websocket._handshake.handshake(self.sock, *addrs, **options)
if self.handshake_response.status in websocket._handshake.SUPPORTED_REDIRECT_STATUSES:
options['redirect_limit'] = options.pop('redirect_limit', 3) - 1
if options['redirect_limit'] < 0:
raise Exception('Redirect limit hit')
url = self.handshake_response.headers['location']
self.sock.close()
return self.connect(url, **options)
self.connected = True
except:
if self.sock:
self.sock.close()
self.sock = None
raise
class TsmConsole(conapi.Console):
def __init__(self, node, config):
self.node = node
self.ws = None
configdata = config.get_node_attributes([node], _configattributes, decrypt=True)
connparams = get_conn_params(node, configdata[node])
self.username = connparams['username']
self.password = connparams['passphrase']
self.bmc = connparams['bmc']
self.origbmc = connparams['bmc']
if ':' in self.bmc:
self.bmc = '[{0}]'.format(self.bmc)
self.datacallback = None
self.nodeconfig = config
self.connected = False
def recvdata(self):
while self.connected:
pendingdata = self.ws.recv()
if pendingdata == '':
self.datacallback(conapi.ConsoleEvent.Disconnect)
return
self.datacallback(pendingdata)
def connect(self, callback):
self.datacallback = callback
rc = rcmd.Command(self.origbmc, self.username,
self.password,
verifycallback=lambda x: True)
wc = rc.oem.wc
bmc = self.bmc
if '%' in self.bmc:
prefix = self.bmc.split('%')[0]
bmc = prefix + ']'
self.ws = WrappedWebSocket(host=bmc)
kv = util.TLSCertVerifier(
self.nodeconfig, self.node, 'pubkeys.tls_hardwaremanager').verify_cert
self.ws.set_verify_callback(kv)
self.ws.connect('wss://{0}/sol?CSRFTOKEN={1}'.format(self.bmc, rc.oem.csrftok), host=bmc, cookie='QSESSIONID={0}'.format(wc.cookies['QSESSIONID']))
self.connected = True
eventlet.spawn_n(self.recvdata)
return
def write(self, data):
self.ws.send(data)
def close(self):
if self.ws:
self.ws.close()
self.connected = False
self.datacallback = None
def create(nodes, element, configmanager, inputdata):
if len(nodes) == 1:
return TsmConsole(nodes[0], configmanager)

View File

@@ -0,0 +1,153 @@
# Copyright 2019-2020 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
import eventlet.queue as queue
import confluent.exceptions as exc
webclient = eventlet.import_patched('pyghmi.util.webclient')
import confluent.messages as msg
import confluent.util as util
class SwitchSensor(object):
def __init__(self, name, states, value=None, health=None):
self.name = name
self.value = value
self.states = states
self.health = health
class WebClient(object):
def __init__(self, node, configmanager, creds):
self.node = node
self.wc = webclient.SecureHTTPConnection(node, port=443, verifycallback=util.TLSCertVerifier(
configmanager, node, 'pubkeys.tls_hardwaremanager').verify_cert)
self.wc.set_basic_credentials(creds[node]['secret.hardwaremanagementuser']['value'], creds[node]['secret.hardwaremanagementpassword']['value'])
def fetch(self, url, results):
rsp, status = self.wc.grab_json_response_with_status(url)
if status == 401:
results.put(msg.ConfluentTargetInvalidCredentials(self.node, 'Unable to authenticate'))
return {}
elif status != 200:
results.put(msg.ConfluentNodeError(self.node, 'Unknown error: ' + rsp + ' while retrieving ' + url))
return {}
return rsp
def update(nodes, element, configmanager, inputdata):
for node in nodes:
yield msg.ConfluentNodeError(node, 'Not Implemented')
def delete(nodes, element, configmanager, inputdata):
for node in nodes:
yield msg.ConfluentNodeError(node, 'Not Implemented')
def create(nodes, element, configmanager, inputdata):
for node in nodes:
yield msg.ConfluentNodeError(node, 'Not Implemented')
def _run_method(method, workers, results, configmanager, nodes, element):
creds = configmanager.get_node_attributes(
nodes, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True)
for node in nodes:
workers.add(eventlet.spawn(method, configmanager, creds,
node, results, element))
def retrieve(nodes, element, configmanager, inputdata):
results = queue.LightQueue()
workers = set([])
if element == ['power', 'state']:
for node in nodes:
yield msg.PowerState(node=node, state='on')
return
elif element == ['health', 'hardware']:
_run_method(retrieve_health, workers, results, configmanager, nodes, element)
elif element[:3] == ['inventory', 'hardware', 'all']:
_run_method(retrieve_inventory, workers, results, configmanager, nodes, element)
elif element[:3] == ['inventory', 'firmware', 'all']:
_run_method(retrieve_firmware, workers, results, configmanager, nodes, element)
elif element == ['sensors', 'hardware', 'all']:
_run_method(list_sensors, workers, results, configmanager, nodes, element)
elif element[:3] == ['sensors', 'hardware', 'all']:
_run_method(retrieve_sensors, workers, results, configmanager, nodes, element)
else:
for node in nodes:
yield msg.ConfluentNodeError(node, 'Not Implemented')
return
while workers:
try:
datum = results.get(10)
while datum:
if datum:
yield datum
datum = results.get_nowait()
except queue.Empty:
pass
eventlet.sleep(0.001)
for t in list(workers):
if t.dead:
workers.discard(t)
try:
while True:
datum = results.get_nowait()
if datum:
yield datum
except queue.Empty:
pass
def retrieve_inventory(configmanager, creds, node, results, element):
if len(element) == 3:
results.put(msg.ChildCollection('all'))
results.put(msg.ChildCollection('system'))
return
wc = WebClient(node, configmanager, creds)
invinfo = wc.fetch('/affluent/inventory/hardware/all', results)
if invinfo:
results.put(msg.KeyValueData(invinfo, node))
def retrieve_firmware(configmanager, creds, node, results, element):
if len(element) == 3:
results.put(msg.ChildCollection('all'))
return
wc = WebClient(node, configmanager, creds)
fwinfo = wc.fetch('/affluent/inventory/firmware/all', results)
if fwinfo:
results.put(msg.Firmware(fwinfo, node))
def list_sensors(configmanager, creds, node, results, element):
wc = WebClient(node, configmanager, creds)
sensors = wc.fetch('/affluent/sensors/hardware/all', results)
for sensor in sensors['item']:
results.put(msg.ChildCollection(sensor))
def retrieve_sensors(configmanager, creds, node, results, element):
wc = WebClient(node, configmanager, creds)
sensors = wc.fetch('/affluent/sensors/hardware/all/{0}'.format(element[-1]), results)
if sensors:
results.put(msg.SensorReadings(sensors['sensors'], node))
def retrieve_health(configmanager, creds, node, results, element):
wc = WebClient(node, configmanager, creds)
hinfo = wc.fetch('/affluent/health', results)
if hinfo:
results.put(msg.HealthSummary(hinfo.get('health', 'unknown'), name=node))
results.put(msg.SensorReadings(hinfo.get('sensors', []), name=node))

View File

@@ -78,6 +78,18 @@ def retrieve(nodes, element, configmanager, inputdata):
for node in nodes:
workers.add(eventlet.spawn(retrieve_health, configmanager, creds,
node, results))
elif element[:3] == ['inventory', 'hardware', 'all']:
creds = configmanager.get_node_attributes(
nodes, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True)
for node in nodes:
workers.add(eventlet.spawn(retrieve_inventory, configmanager,
creds, node, results, element))
elif element[:3] == ['inventory', 'firmware', 'all']:
creds = configmanager.get_node_attributes(
nodes, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True)
for node in nodes:
workers.add(eventlet.spawn(retrieve_firmware, configmanager,
creds, node, results, element))
else:
for node in nodes:
yield msg.ConfluentNodeError(node, 'Not Implemented')
@@ -105,6 +117,44 @@ def retrieve(nodes, element, configmanager, inputdata):
pass
def retrieve_inventory(configmanager, creds, node, results, element):
if len(element) == 3:
results.put(msg.ChildCollection('all'))
results.put(msg.ChildCollection('system'))
return
wc = cnos_login(node, configmanager, creds)
sysinfo = wc.grab_json_response('/nos/api/sysinfo/inventory')
invinfo = {
'inventory': [{
'name': 'System',
'present': True,
'information': {
'Product name': sysinfo['Model'],
'Serial Number': sysinfo['Electronic Serial Number'],
'Board Serial Number': sysinfo['Serial Number'],
'Manufacturer': 'Lenovo',
'Model': sysinfo['Machine Type Model'],
'FRU Number': sysinfo['FRU'].strip(),
}
}]
}
results.put(msg.KeyValueData(invinfo, node))
def retrieve_firmware(configmanager, creds, node, results, element):
if len(element) == 3:
results.put(msg.ChildCollection('all'))
return
wc = cnos_login(node, configmanager, creds)
sysinfo = wc.grab_json_response('/nos/api/sysinfo/inventory')
items = [{
'Software': {'version': sysinfo['Software Revision']},
},
{
'BIOS': {'version': sysinfo['BIOS Revision']},
}]
results.put(msg.Firmware(items, node))
def retrieve_health(configmanager, creds, node, results):
wc = cnos_login(node, configmanager, creds)
hinfo = wc.grab_json_response('/nos/api/sysinfo/globalhealthstatus')

View File

@@ -42,6 +42,11 @@ import traceback
if not hasattr(ssl, 'SSLEOFError'):
ssl.SSLEOFError = None
try:
range = xrange
except NameError:
pass
pci_cache = {}
def get_dns_txt(qstring):
@@ -131,7 +136,7 @@ def hex2bin(hexstring):
if len(hexvals) < 2:
hexvals = hexstring.split(' ')
if len(hexvals) < 2:
hexvals = [hexstring[i:i+2] for i in xrange(0, len(hexstring), 2)]
hexvals = [hexstring[i:i+2] for i in range(0, len(hexstring), 2)]
bytedata = [int(i, 16) for i in hexvals]
return bytearray(bytedata)
@@ -278,6 +283,7 @@ def _donothing(data):
class IpmiConsole(conapi.Console):
configattributes = frozenset(_configattributes)
bmctonodemapping = {}
def __init__(self, node, config):
self.error = None
@@ -295,10 +301,21 @@ class IpmiConsole(conapi.Console):
self.bmc = connparams['bmc']
self.port = connparams['port']
self.connected = False
# ok, is self.bmc unique among nodes already
# Cannot actually create console until 'connect', when we get callback
if (self.bmc in self.bmctonodemapping and
self.bmctonodemapping[self.bmc] != node):
raise Exception(
"Duplicate hardwaremanagement.manager attribute for {0} and {1}".format(
node, self.bmctonodemapping[self.bmc]))
self.bmctonodemapping[self.bmc] = node
def __del__(self):
self.solconnection = None
try:
del self.bmctonodemapping[self.bmc]
except KeyError:
pass
def handle_data(self, data):
if type(data) == dict:
@@ -584,9 +601,13 @@ class IpmiHandler(object):
self.output.put(msg.CreatedResource(
'nodes/{0}/media/uploads/{1}'.format(self.node, u.name)))
def get_diags(self, savefile, progress):
return self.ipmicmd.get_diagnostic_data(
savefile, progress=progress, autosuffix=True)
def handle_servicedata_fetch(self):
u = firmwaremanager.Updater(
self.node, self.ipmicmd.get_diagnostic_data,
self.node, self.get_diags,
self.inputdata.nodefile(self.node), self.tenant, type='ffdc',
owner=self.current_user)
self.output.put(msg.CreatedResource(
@@ -625,8 +646,10 @@ class IpmiHandler(object):
return self.handle_ntp()
elif self.element[1:4] == ['management_controller', 'extended', 'all']:
return self.handle_bmcconfig()
elif self.element[1:4] == ['management_controller', 'extended', 'all']:
elif self.element[1:4] == ['management_controller', 'extended', 'advanced']:
return self.handle_bmcconfig(True)
elif self.element[1:4] == ['management_controller', 'extended', 'extra']:
return self.handle_bmcconfig(True, extended=True)
elif self.element[1:3] == ['system', 'all']:
return self.handle_sysconfig()
elif self.element[1:3] == ['system', 'advanced']:
@@ -658,7 +681,7 @@ class IpmiHandler(object):
if len(self.element) == 4:
# A list of destinations
maxdest = self.ipmicmd.get_alert_destination_count()
for alertidx in xrange(0, maxdest + 1):
for alertidx in range(0, maxdest + 1):
self.output.put(msg.ChildCollection(alertidx))
return
elif len(self.element) == 5:
@@ -1391,12 +1414,14 @@ class IpmiHandler(object):
'Cannot read the "clear" resource')
self.ipmicmd.clear_system_configuration()
def handle_bmcconfig(self, advanced=False):
def handle_bmcconfig(self, advanced=False, extended=False):
if 'read' == self.op:
try:
self.output.put(msg.ConfigSet(
self.node,
self.ipmicmd.get_bmc_configuration()))
if extended:
bmccfg = self.ipmicmd.get_extended_bmc_configuration()
else:
bmccfg = self.ipmicmd.get_bmc_configuration()
self.output.put(msg.ConfigSet(self.node, bmccfg))
except Exception as e:
self.output.put(
msg.ConfluentNodeError(self.node, str(e)))
@@ -1469,6 +1494,11 @@ class IpmiHandler(object):
def save_licenses(self):
directory = self.inputdata.nodefile(self.node)
if not os.access(os.path.dirname(directory), os.W_OK):
raise exc.InvalidArgumentException(
'The onfluent system user/group is unable to write to '
'directory {0}, check ownership and permissions'.format(
os.path.dirname(directory)))
for saved in self.ipmicmd.save_licenses(directory):
self.output.put(msg.SavedFile(self.node, saved))
@@ -1476,7 +1506,15 @@ class IpmiHandler(object):
if self.element[-1] == '':
self.element = self.element[:-1]
if self.op in ('create', 'update'):
self.ipmicmd.apply_license(self.inputdata.nodefile(self.node))
filename = self.inputdata.nodefile(self.node)
if not os.access(filename, os.R_OK):
errstr = ('{0} is not readable by confluent on {1} '
'(ensure confluent user or group can access file '
'and parent directories)').format(
filename, socket.gethostname())
self.output.put(msg.ConfluentNodeError(self.node, errstr))
return
self.ipmicmd.apply_license(filename)
if len(self.element) == 3:
self.output.put(msg.ChildCollection('all'))
i = 1

View File

@@ -175,12 +175,15 @@ class IpmiCommandWrapper(ipmicommand.Command):
raise exc.TargetEndpointUnreachable(se.strerror)
else:
raise exc.TargetEndpointUnreachable(str(se))
if isinstance(se, socket.timeout) or (len(se) > 1 and se[1] == 'EHOSTUNREACH'):
if isinstance(se, socket.timeout):
raise exc.TargetEndpointUnreachable('timeout')
raise
except pygexc.PyghmiException as pe:
if 'Access Denied' in str(pe):
raise exc.TargetEndpointBadCredentials()
if 'Redfish not ready' in str(pe):
raise exc.TargetEndpointUnreachable('Redfish not yet ready')
raise
def close_confluent(self):
if self._attribwatcher:
@@ -462,9 +465,13 @@ class IpmiHandler(object):
self.output.put(msg.CreatedResource(
'nodes/{0}/media/uploads/{1}'.format(self.node, u.name)))
def get_diags(self, savefile, progress):
return self.ipmicmd.get_diagnostic_data(
savefile, progress=progress, autosuffix=True)
def handle_servicedata_fetch(self):
u = firmwaremanager.Updater(
self.node, self.ipmicmd.get_diagnostic_data,
self.node, self.get_diags,
self.inputdata.nodefile(self.node), self.tenant, type='ffdc',
owner=self.current_user)
self.output.put(msg.CreatedResource(
@@ -1347,6 +1354,14 @@ class IpmiHandler(object):
if self.element[-1] == '':
self.element = self.element[:-1]
if self.op in ('create', 'update'):
filename = self.inputdata.nodefile(self.node)
if not os.access(filename, os.R_OK):
errstr = ('{0} is not readable by confluent on {1} '
'(ensure confluent user or group can access file '
'and parent directories)').format(
filename, socket.gethostname())
self.output.put(msg.ConfluentNodeError(self.node, errstr))
return
self.ipmicmd.apply_license(self.inputdata.nodefile(self.node))
if len(self.element) == 3:
self.output.put(msg.ChildCollection('all'))

View File

@@ -123,8 +123,8 @@ def sessionhdl(connection, authname, skipauth=False, cert=None):
if authdata:
cfm = authdata[1]
authenticated = True
# version 0 == original, version 1 == pickle3 allowed
send_data(connection, "Confluent -- v{0} --".format(sys.version_info[0] - 2))
# version 0 == original, version 1 == pickle3 allowed, 2 = pickle forbidden, msgpack allowed
send_data(connection, "Confluent -- v2 --")
while not authenticated: # prompt for name and passphrase
send_data(connection, {'authpassed': 0})
response = tlvdata.recv(connection)
@@ -154,20 +154,25 @@ def sessionhdl(connection, authname, skipauth=False, cert=None):
cfm = authdata[1]
send_data(connection, {'authpassed': 1})
request = tlvdata.recv(connection)
if request and 'collective' in request and skipauth:
if not libssl:
if request and 'collective' in request:
if skipauth:
if not libssl:
tlvdata.send(
connection,
{'collective': {'error': 'Server either does not have '
'python-pyopenssl installed or has an '
'incorrect version installed '
'(e.g. pyOpenSSL would need to be '
'replaced with python-pyopenssl). '
'Restart confluent after updating '
'the dependency.'}})
return
return collective.handle_connection(connection, None, request['collective'],
local=True)
else:
tlvdata.send(
connection,
{'collective': {'error': 'Server either does not have '
'python-pyopenssl installed or has an '
'incorrect version installed '
'(e.g. pyOpenSSL would need to be '
'replaced with python-pyopenssl). '
'Restart confluent after updating '
'the dependency.'}})
return
return collective.handle_connection(connection, None, request['collective'],
local=True)
connection,
{'collective': {'error': 'collective management commands may only be used by root'}})
while request is not None:
try:
process_request(

View File

@@ -13,9 +13,9 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
Prefix: %{_prefix}
BuildArch: noarch
%if "%{dist}" == ".el8"
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-pyte, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-monotonic
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-pyte, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-monotonic, python3-websocket-client python3-msgpack
%else
Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client, python-pyparsing, python-paramiko, python-dns, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-pyte, python-lxml, python-eficompressor, python-setuptools, python-dateutil
Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client, python-pyparsing, python-paramiko, python-dns, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-pyte, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python2-websocket-client python2-msgpack
%endif
Vendor: Jarrod Johnson <jjohnson2@lenovo.com>
Url: http://xcat.sf.net/
@@ -42,16 +42,41 @@ python2 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUI
for file in $(grep confluent/__init__.py INSTALLED_FILES.bare); do
rm $RPM_BUILD_ROOT/$file
done
grep -v confluent/__init__.py INSTALLED_FILES.bare > INSTALLED_FILES
grep -v confluent/__init__.py INSTALLED_FILES.bare | grep -v etc/init.d/confluent > INSTALLED_FILES
rm $RPM_BUILD_ROOT/etc/init.d/confluent
rmdir $RPM_BUILD_ROOT/etc/init.d
rmdir $RPM_BUILD_ROOT/etc
# Only do non-root confluent if systemd of the platform supports it
systemd-analyze verify $RPM_BUILD_ROOT/usr/lib/systemd/system/confluent.service 2>&1 | grep "'AmbientCapabilities'" > /dev/null && sed -e 's/User=.*//' -e 's/Group=.*//' -e 's/AmbientCapabilities=.*//' -i $RPM_BUILD_ROOT/usr/lib/systemd/system/confluent.service
cat INSTALLED_FILES
%triggerin -- python-pyghmi
%triggerin -- python-pyghmi, python3-pyghmi, python2-pyghmi
if [ -x /usr/bin/systemctl ]; then /usr/bin/systemctl try-restart confluent >& /dev/null; fi
true
%pre
getent group confluent > /dev/null || /usr/sbin/groupadd -r confluent
getent passwd confluent > /dev/null || /usr/sbin/useradd -r -g confluent -d /var/lib/confluent -s /sbin/nologin confluent
mkdir -p /etc/confluent /var/lib/confluent /var/log/confluent /var/cache/confluent
chown -R confluent:confluent /etc/confluent /var/lib/confluent /var/log/confluent /var/cache/confluent
%post
sysctl -p /usr/lib/sysctl.d/confluent.conf >& /dev/null
if [ -x /usr/bin/systemctl ]; then /usr/bin/systemctl try-restart confluent >& /dev/null; fi
NEEDCHOWN=0
NEEDSTART=0
find /etc/confluent -uid 0 | egrep '.*' > /dev/null && NEEDCHOWN=1
find /var/log/confluent -uid 0 | egrep '.*' > /dev/null && NEEDCHOWN=1
find /var/run/confluent -uid 0 | egrep '.*' > /dev/null && NEEDCHOWN=1
find /var/cache/confluent -uid 0 | egrep '.*' > /dev/null && NEEDCHOWN=1
if [ $NEEDCHOWN = 1 ]; then
if systemctl is-active confluent > /dev/null; then
NEEDSTART=1
systemctl stop confluent
fi
chown -R confluent:confluent /etc/confluent /var/lib/confluent /var/log/confluent /var/cache/confluent
fi
systemctl daemon-reload
if systemctl is-active confluent > /dev/null || [ $NEEDSTART = 1 ]; then /usr/bin/systemctl restart confluent >& /dev/null; fi
if [ ! -e /etc/pam.d/confluent ]; then
ln -s /etc/pam.d/sshd /etc/pam.d/confluent
fi

View File

@@ -14,6 +14,7 @@ setup(
'confluent/discovery/handlers/',
'confluent/networking/',
'confluent/plugins/hardwaremanagement/',
'confluent/plugins/console/',
'confluent/plugins/shell/',
'confluent/collective/',
'confluent/plugins/configuration/'],

View File

@@ -1,13 +1,25 @@
# IBM(c) 2015 Apache 2.0
# Lenovo(c) 2020 Apache 2.0
[Unit]
Description=Confluent hardware manager
Description=Confluent hardware manager
[Service]
Type=forking
#PIDFile=/var/run/confluent/pid
RuntimeDirectory=confluent
StateDirectory=confluent
CacheDirectory=confluent
LogsDirectory=confluent
ConfigurationDirectory=confluent
ExecStart=/opt/confluent/bin/confluent
ExecStop=/opt/confluent/bin/confetty shutdown /
Restart=on-failure
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_CHOWN
User=confluent
Group=confluent
DevicePolicy=closed
ProtectControlGroups=true
ProtectSystem=true
[Install]
WantedBy=multi-user.target