mirror of
https://github.com/xcat2/confluent.git
synced 2026-05-16 19:34:19 +00:00
Merge branch 'master' into remote_discovery
This commit is contained in:
@@ -33,7 +33,7 @@ import confluent.client as client
|
||||
|
||||
argparser = optparse.OptionParser(
|
||||
usage="Usage: %prog [options] <noderange> "
|
||||
"([status|on|off|shutdown|boot|reset])")
|
||||
"([status|on|off|shutdown|boot|reset|pdu_status|pdu_off|pdu_on])")
|
||||
argparser.add_option('-p', '--showprevious', dest='previous',
|
||||
action='store_true', default=False,
|
||||
help='Show previous power state')
|
||||
@@ -51,22 +51,27 @@ except IndexError:
|
||||
client.check_globbing(noderange)
|
||||
setstate = None
|
||||
if len(args) > 1:
|
||||
setstate = args[1]
|
||||
if setstate == 'softoff':
|
||||
setstate = 'shutdown'
|
||||
elif not args[1] in ('stat', 'state', 'status'):
|
||||
setstate = args[1]
|
||||
|
||||
if setstate not in (None, 'on', 'off', 'shutdown', 'boot', 'reset'):
|
||||
if setstate not in (None, 'on', 'off', 'shutdown', 'boot', 'reset', 'pdu_status', 'pdu_stat', 'pdu_on', 'pdu_off', 'status', 'stat', 'state'):
|
||||
argparser.print_help()
|
||||
sys.exit(1)
|
||||
session = client.Command()
|
||||
exitcode = 0
|
||||
session.add_precede_key('oldstate')
|
||||
powurl = 'state'
|
||||
if setstate and setstate.startswith('pdu_'):
|
||||
setstate = setstate.replace('pdu_', '')
|
||||
powurl = 'inlets/all'
|
||||
if setstate in ('status', 'state', 'stat'):
|
||||
setstate = None
|
||||
|
||||
if options.previous:
|
||||
# get previous states
|
||||
prev = {}
|
||||
for rsp in session.read("/noderange/{0}/power/state".format(noderange)):
|
||||
for rsp in session.read("/noderange/{0}/power/{1}".format(noderange, powurl)):
|
||||
# gets previous (current) states
|
||||
|
||||
databynode = rsp["databynode"]
|
||||
@@ -77,4 +82,7 @@ if options.previous:
|
||||
# add dictionary to session
|
||||
session.add_precede_dict(prev)
|
||||
|
||||
sys.exit(session.simple_noderange_command(noderange, '/power/state', setstate, promptover=options.maxnodes))
|
||||
def outhandler(node, res):
|
||||
for k in res[node]:
|
||||
client.cprint('{0}: {1}: {2}'.format(node, k.replace('inlet_', ''), res[node][k]))
|
||||
sys.exit(session.simple_noderange_command(noderange, '/power/{0}'.format(powurl), setstate, promptover=options.maxnodes, key='state', outhandler=outhandler))
|
||||
|
||||
@@ -208,7 +208,7 @@ class Command(object):
|
||||
def add_precede_dict(self, dict):
|
||||
self._prevdict = dict
|
||||
|
||||
def handle_results(self, ikey, rc, res, errnodes=None):
|
||||
def handle_results(self, ikey, rc, res, errnodes=None, outhandler=None):
|
||||
if 'error' in res:
|
||||
if errnodes is not None:
|
||||
errnodes.add(self._currnoderange)
|
||||
@@ -245,10 +245,12 @@ class Command(object):
|
||||
node, val, self._prevdict[node]))
|
||||
else:
|
||||
cprint('{0}: {1}'.format(node, val))
|
||||
elif outhandler:
|
||||
outhandler(node, res)
|
||||
return rc
|
||||
|
||||
def simple_noderange_command(self, noderange, resource, input=None,
|
||||
key=None, errnodes=None, promptover=None, **kwargs):
|
||||
key=None, errnodes=None, promptover=None, outhandler=None, **kwargs):
|
||||
try:
|
||||
self._currnoderange = noderange
|
||||
rc = 0
|
||||
@@ -262,13 +264,13 @@ class Command(object):
|
||||
if input is None:
|
||||
for res in self.read('/noderange/{0}/{1}'.format(
|
||||
noderange, resource)):
|
||||
rc = self.handle_results(ikey, rc, res, errnodes)
|
||||
rc = self.handle_results(ikey, rc, res, errnodes, outhandler)
|
||||
else:
|
||||
self.stop_if_noderange_over(noderange, promptover)
|
||||
kwargs[ikey] = input
|
||||
for res in self.update('/noderange/{0}/{1}'.format(
|
||||
noderange, resource), kwargs):
|
||||
rc = self.handle_results(ikey, rc, res, errnodes)
|
||||
rc = self.handle_results(ikey, rc, res, errnodes, outhandler)
|
||||
self._currnoderange = None
|
||||
return rc
|
||||
except KeyboardInterrupt:
|
||||
|
||||
@@ -0,0 +1,38 @@
|
||||
nodeping(8) -- Pings a node or a noderange.
|
||||
==============================
|
||||
## SYNOPSIS
|
||||
`nodeping [options] noderange`
|
||||
|
||||
## DESCRIPTION
|
||||
**nodeping** is a command that pings the default NIC on a node.
|
||||
It can also be used with the `-s` flag to change the ping location to something that is 'non primary'
|
||||
|
||||
|
||||
## OPTIONS
|
||||
* ` -f` COUNT, `-c` COUNT, --count=COUNT
|
||||
Number of commands to run at a time
|
||||
* `-h`, `--help`:
|
||||
Show help message and exit
|
||||
* `-s` SUBSTITUTENAME, --substitutename=SUBSTITUTENAME
|
||||
Use a different name other than the nodename for ping
|
||||
|
||||
## EXAMPLES
|
||||
* Pinging a node :
|
||||
`# nodeping <node>`
|
||||
`node : ping`
|
||||
|
||||
* Pinging a group:
|
||||
`# nodeping <groupname>`
|
||||
`Node1 : ping
|
||||
Node2 : ping
|
||||
Node3 : ping`
|
||||
|
||||
* Pinging BMC on a node:
|
||||
`# nodeping -s {bmc} <noderange>`
|
||||
` Node-bmc : ping`
|
||||
|
||||
* Fail to ping node:
|
||||
`# nodeping <node>`
|
||||
`node : no_ping`
|
||||
|
||||
|
||||
@@ -24,6 +24,9 @@ respond.
|
||||
* `reset`: Request immediate reset of nodes of the noderange. Nodes that are
|
||||
off will not react to this request.
|
||||
* `status`: Behave identically to having no argument passed at all.
|
||||
* `pdu_status`: Query state of associated PDU outlets, if configured.
|
||||
* `pdu_on`: Energize all PDU outlets associated with the noderange.
|
||||
* `pdu_off`: De-energize all PDU outlets associated with the noderange.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
|
||||
@@ -43,6 +43,20 @@ mkdir -p /etc/pki/tls/certs
|
||||
cat /tls/*.pem > /etc/pki/tls/certs/ca-bundle.crt
|
||||
TRIES=0
|
||||
touch /etc/confluent/confluent.info
|
||||
if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then
|
||||
mkdir -p /media/ident
|
||||
mount /dev/disk/by-label/CNFLNT_IDNT /media/ident
|
||||
if [ -e /media/ident/genesis_bootstrap.sh ]; then
|
||||
exec sh /media/ident/genesis_bootstrap.sh
|
||||
fi
|
||||
fi
|
||||
if [ -e /dev/disk/by-label/GENESIS-X86 ]; then
|
||||
mkdir -p /media/genesis
|
||||
mount /dev/disk/by-label/GENESIS-X86 /media/genesis
|
||||
if [ -e /media/genesis/genesis_bootstrap.sh ]; then
|
||||
exec sh /media/genesis/genesis_bootstrap.sh
|
||||
fi
|
||||
fi
|
||||
cd /sys/class/net
|
||||
echo -n "Scanning for network configuration..."
|
||||
while ! grep ^EXTMGRINFO: /etc/confluent/confluent.info | awk -F'|' '{print $3}' | grep 1 >& /dev/null && [ "$TRIES" -lt 30 ]; do
|
||||
@@ -141,6 +155,23 @@ elif [ "$autoconfigmethod" = "static" ]; then
|
||||
ip route add default via $v4gw
|
||||
fi
|
||||
fi
|
||||
nameserversec=0
|
||||
while read -r entry; do
|
||||
if [ $nameserversec = 1 ]; then
|
||||
if [[ $entry == "-"* ]] && [[ $entry != "- ''" ]]; then
|
||||
echo nameserver ${entry#- } >> /etc/resolv.conf
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
nameserversec=0
|
||||
if [ "${entry%:*}" = "nameservers" ]; then
|
||||
nameserversec=1
|
||||
continue
|
||||
fi
|
||||
done < /etc/confluent/confluent.deploycfg
|
||||
dnsdomain=$(grep ^dnsdomain: /etc/confluent/confluent.deploycfg)
|
||||
dnsdomain=${dnsdomain#dnsdomain: }
|
||||
echo search $dnsdomain >> /etc/resolv.conf
|
||||
echo -n "Initializing ssh..."
|
||||
ssh-keygen -A
|
||||
for pubkey in /etc/ssh/ssh_host*key.pub; do
|
||||
|
||||
@@ -22,6 +22,8 @@ import confluent.util as util
|
||||
import confluent.client as client
|
||||
import confluent.sshutil as sshutil
|
||||
import confluent.certutil as certutil
|
||||
import confluent.netutil as netutil
|
||||
import socket
|
||||
try:
|
||||
input = raw_input
|
||||
except NameError:
|
||||
@@ -149,10 +151,13 @@ def local_node_trust_setup():
|
||||
neededlines = set([
|
||||
'HostbasedAuthentication yes', 'HostbasedUsesNameFromPacketOnly yes',
|
||||
'IgnoreRhosts no'])
|
||||
if domain and not myname.endswith(domain):
|
||||
myprincipals.add('{0}.{1}'.format(myname, domain))
|
||||
if domain and '.' in myname and myname.endswith(domain):
|
||||
myprincipals.add(myname.split('.')[0])
|
||||
myshortname = myname.split('.')[0]
|
||||
myprincipals.add(myshortname)
|
||||
if domain:
|
||||
myprincipals.add('{0}.{1}'.format(myshortname, domain))
|
||||
for addr in netutil.get_my_addresses():
|
||||
addr = socket.inet_ntop(addr[0], addr[1])
|
||||
myprincipals.add(addr)
|
||||
for pubkey in glob.glob('/etc/ssh/ssh_host_*_key.pub'):
|
||||
currpubkey = open(pubkey, 'rb').read()
|
||||
cert = sshutil.sign_host_key(currpubkey, myname, myprincipals)
|
||||
|
||||
@@ -162,6 +162,10 @@ def authorize(name, element, tenant=False, operation='create',
|
||||
return False
|
||||
manager = configmanager.ConfigManager(tenant, username=user)
|
||||
userobj = manager.get_user(user)
|
||||
if element and (element.startswith('/sessions/current/webauthn/registered_credentials/') or element.startswith('/sessions/current/webauthn/validate/')):
|
||||
return userobj, manager, user, tenant, skipuserobj
|
||||
if userobj and userobj.get('role', None) == 'Stub':
|
||||
userobj = None
|
||||
if not userobj:
|
||||
for group in userutil.grouplist(user):
|
||||
userobj = manager.get_usergroup(group)
|
||||
|
||||
@@ -534,6 +534,12 @@ node = {
|
||||
'To support this scenario, the switch should be set up to allow independent operation of member ports123654 (e.g. lacp bypass mode or fallback mode).',
|
||||
'validvalues': ('lacp', 'loadbalance', 'roundrobin', 'activebackup', 'none')
|
||||
},
|
||||
'power.pdu': {
|
||||
'description': 'Specifies the managed PDU associated with a power input on the node'
|
||||
},
|
||||
'power.outlet': {
|
||||
'description': 'Species the outlet identifier on the PDU associoted with a power input on the node'
|
||||
},
|
||||
# 'id.modelnumber': {
|
||||
# 'description': 'The manufacturer dictated model number for the node',
|
||||
# },
|
||||
|
||||
@@ -113,7 +113,7 @@ _attraliases = {
|
||||
'bmcpass': 'secret.hardwaremanagementpassword',
|
||||
'switchpass': 'secret.hardwaremanagementpassword',
|
||||
}
|
||||
_validroles = ('Administrator', 'Operator', 'Monitor')
|
||||
_validroles = ('Administrator', 'Operator', 'Monitor', 'Stub')
|
||||
|
||||
membership_callback = None
|
||||
|
||||
@@ -485,7 +485,7 @@ def attribute_is_invalid(attrname, attrval):
|
||||
|
||||
|
||||
def _get_valid_attrname(attrname):
|
||||
if attrname.startswith('net.'):
|
||||
if attrname.startswith('net.') or attrname.startswith('power.'):
|
||||
# For net.* attribtues, split on the dots and put back together
|
||||
# longer term we might want a generic approach, but
|
||||
# right now it's just net. attributes
|
||||
@@ -2447,10 +2447,10 @@ class ConfigManager(object):
|
||||
uid = tmpconfig[confarea].get('id', None)
|
||||
displayname = tmpconfig[confarea].get('displayname', None)
|
||||
self.create_user(user, uid=uid, displayname=displayname)
|
||||
if 'cryptpass' in tmpconfig[confarea][user]:
|
||||
self._cfgstore['users'][user]['cryptpass'] = \
|
||||
tmpconfig[confarea][user]['cryptpass']
|
||||
_mark_dirtykey('users', user, self.tenant)
|
||||
for attrname in ('authid', 'authenticators', 'cryptpass'):
|
||||
if attrname in tmpconfig[confarea][user]:
|
||||
self._cfgstore['users'][user][attrname] = tmpconfig[confarea][user][attrname]
|
||||
_mark_dirtykey('users', user, self.tenant)
|
||||
if sync:
|
||||
self._bg_sync_to_file()
|
||||
|
||||
@@ -2548,8 +2548,13 @@ class ConfigManager(object):
|
||||
if statelessmode:
|
||||
return
|
||||
with cls._syncstate:
|
||||
if (cls._syncrunning and cls._cfgwriter is not None and
|
||||
cls._cfgwriter.isAlive()):
|
||||
isalive = False
|
||||
if cls._cfgwriter is not None:
|
||||
try:
|
||||
isalive = cls._cfgwriter.isAlive()
|
||||
except AttributeError:
|
||||
isalive = cls._cfgwriter.is_alive()
|
||||
if (cls._syncrunning and isalive):
|
||||
cls._writepending = True
|
||||
return
|
||||
if cls._syncrunning: # This suggests an unclean write attempt,
|
||||
@@ -2777,8 +2782,8 @@ def dump_db_to_directory(location, password, redact=None, skipkeys=False):
|
||||
cfgfile.write('\n')
|
||||
bkupglobals = get_globals()
|
||||
if bkupglobals:
|
||||
json.dump(bkupglobals, open(os.path.join(location, 'globals.json'),
|
||||
'w'))
|
||||
with open(os.path.join(location, 'globals.json'), 'w') as globout:
|
||||
json.dump(bkupglobals, globout)
|
||||
try:
|
||||
for tenant in os.listdir(
|
||||
os.path.join(ConfigManager._cfgdir, '/tenants/')):
|
||||
|
||||
@@ -457,6 +457,8 @@ def _init_core():
|
||||
'pluginattrs': ['hardwaremanagement.method'],
|
||||
'default': 'ipmi',
|
||||
}),
|
||||
'inlets': PluginCollection({'handler': 'pdu'}),
|
||||
'outlets': PluginCollection({'pluginattrs': ['hardwaremanagement.method']}),
|
||||
'reseat': PluginRoute({'handler': 'enclosure'}),
|
||||
},
|
||||
'sensors': {
|
||||
|
||||
@@ -393,7 +393,7 @@ def snoop(handler, protocol=None, nodeguess=None):
|
||||
if level == socket.IPPROTO_IP and typ == IP_PKTINFO:
|
||||
idx, recv = struct.unpack('II', cmsgarr[16:24])
|
||||
recv = ipfromint(recv)
|
||||
rqv = memoryview(rawbuffer)
|
||||
rqv = memoryview(rawbuffer)[:i]
|
||||
if rawbuffer[0] == 1: # Boot request
|
||||
process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv)
|
||||
elif netc == net6:
|
||||
|
||||
@@ -281,6 +281,8 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
|
||||
def _get_svrip(peerdata):
|
||||
for addr in peerdata['addresses']:
|
||||
if addr[0].startswith('fe80::'):
|
||||
if '%' not in addr[0]:
|
||||
return addr[0] + '%{0}'.format(addr[3])
|
||||
return addr[0]
|
||||
return peerdata['addresses'][0][0]
|
||||
|
||||
|
||||
@@ -21,6 +21,10 @@ try:
|
||||
import Cookie
|
||||
except ModuleNotFoundError:
|
||||
import http.cookies as Cookie
|
||||
try:
|
||||
import confluent.webauthn as webauthn
|
||||
except ImportError:
|
||||
webauthn = None
|
||||
import confluent.auth as auth
|
||||
import confluent.config.attributes as attribs
|
||||
import confluent.consoleserver as consoleserver
|
||||
@@ -207,6 +211,8 @@ def _should_skip_authlog(env):
|
||||
if '/sessions/current/async' in env['PATH_INFO']:
|
||||
# this is effectively invisible
|
||||
return True
|
||||
if '/sessions/current/webauthn/registered_credentials' in env['PATH_INFO']:
|
||||
return True
|
||||
if (env['REQUEST_METHOD'] == 'GET' and
|
||||
('/sensors/' in env['PATH_INFO'] or
|
||||
'/health/' in env['PATH_INFO'] or
|
||||
@@ -263,18 +269,24 @@ def _csrf_valid(env, session):
|
||||
env['HTTP_CONFLUENTAUTHTOKEN'] == session['csrftoken'])
|
||||
|
||||
|
||||
def _authorize_request(env, operation):
|
||||
def _authorize_request(env, operation, reqbody):
|
||||
"""Grant/Deny access based on data from wsgi env
|
||||
|
||||
"""
|
||||
authdata = None
|
||||
name = ''
|
||||
sessionid = None
|
||||
sessid = None
|
||||
cookie = Cookie.SimpleCookie()
|
||||
element = env['PATH_INFO']
|
||||
if element.startswith('/sessions/current/'):
|
||||
element = None
|
||||
if 'HTTP_COOKIE' in env:
|
||||
if (element.startswith('/sessions/current/webauthn/registered_credentials/')
|
||||
or element.startswith('/sessions/current/webauthn/validate/')):
|
||||
name = element.rsplit('/')[-1]
|
||||
authdata = auth.authorize(name, element=element, operation=operation)
|
||||
else:
|
||||
element = None
|
||||
if (not authdata) and 'HTTP_COOKIE' in env:
|
||||
cidx = (env['HTTP_COOKIE']).find('confluentsessionid=')
|
||||
if cidx >= 0:
|
||||
sessionid = env['HTTP_COOKIE'][cidx+19:cidx+51]
|
||||
@@ -322,18 +334,13 @@ def _authorize_request(env, operation):
|
||||
return {'code': 403}
|
||||
elif not authdata:
|
||||
return {'code': 401}
|
||||
sessid = util.randomstring(32)
|
||||
while sessid in httpsessions:
|
||||
sessid = util.randomstring(32)
|
||||
httpsessions[sessid] = {'name': name, 'expiry': time.time() + 90,
|
||||
'skipuserobject': authdata[4],
|
||||
'inflight': set([])}
|
||||
if 'HTTP_CONFLUENTAUTHTOKEN' in env:
|
||||
httpsessions[sessid]['csrftoken'] = util.randomstring(32)
|
||||
cookie['confluentsessionid'] = util.stringify(sessid)
|
||||
cookie['confluentsessionid']['secure'] = 1
|
||||
cookie['confluentsessionid']['httponly'] = 1
|
||||
cookie['confluentsessionid']['path'] = '/'
|
||||
sessid = _establish_http_session(env, authdata, name, cookie)
|
||||
if authdata and element and element.startswith('/sessions/current/webauthn/validate/'):
|
||||
if webauthn:
|
||||
for rsp in webauthn.handle_api_request(element, env, None, authdata[2], authdata[1], None, reqbody, None):
|
||||
if rsp['verified']:
|
||||
sessid = _establish_http_session(env, authdata, name, cookie)
|
||||
break
|
||||
skiplog = _should_skip_authlog(env)
|
||||
if authdata:
|
||||
auditmsg = {
|
||||
@@ -352,17 +359,32 @@ def _authorize_request(env, operation):
|
||||
auditmsg['user'] = util.stringify(authdata[2])
|
||||
if sessid is not None:
|
||||
authinfo['sessionid'] = sessid
|
||||
if 'csrftoken' in httpsessions[sessid]:
|
||||
authinfo['authtoken'] = httpsessions[sessid]['csrftoken']
|
||||
httpsessions[sessid]['cfgmgr'] = authdata[1]
|
||||
if not skiplog:
|
||||
auditlog.log(auditmsg)
|
||||
if 'csrftoken' in httpsessions[sessid]:
|
||||
authinfo['authtoken'] = httpsessions[sessid]['csrftoken']
|
||||
httpsessions[sessid]['cfgmgr'] = authdata[1]
|
||||
return authinfo
|
||||
elif authdata is None:
|
||||
return {'code': 401}
|
||||
else:
|
||||
return {'code': 403}
|
||||
|
||||
def _establish_http_session(env, authdata, name, cookie):
|
||||
sessid = util.randomstring(32)
|
||||
while sessid in httpsessions:
|
||||
sessid = util.randomstring(32)
|
||||
httpsessions[sessid] = {'name': name, 'expiry': time.time() + 90,
|
||||
'skipuserobject': authdata[4],
|
||||
'inflight': set([])}
|
||||
if 'HTTP_CONFLUENTAUTHTOKEN' in env:
|
||||
httpsessions[sessid]['csrftoken'] = util.randomstring(32)
|
||||
cookie['confluentsessionid'] = util.stringify(sessid)
|
||||
cookie['confluentsessionid']['secure'] = 1
|
||||
cookie['confluentsessionid']['httponly'] = 1
|
||||
cookie['confluentsessionid']['path'] = '/'
|
||||
return sessid
|
||||
|
||||
|
||||
def _pick_mimetype(env):
|
||||
"""Detect the http indicated mime to send back.
|
||||
@@ -603,7 +625,7 @@ def resourcehandler_backend(env, start_response):
|
||||
if operation != 'retrieve' and 'restexplorerop' in querydict:
|
||||
operation = querydict['restexplorerop']
|
||||
del querydict['restexplorerop']
|
||||
authorized = _authorize_request(env, operation)
|
||||
authorized = _authorize_request(env, operation, reqbody)
|
||||
if 'logout' in authorized:
|
||||
start_response('200 Successful logout', headers)
|
||||
yield('{"result": "200 - Successful logout"}')
|
||||
@@ -632,7 +654,7 @@ def resourcehandler_backend(env, start_response):
|
||||
raise Exception("Unrecognized code from auth engine")
|
||||
headers.extend(
|
||||
("Set-Cookie", m.OutputString())
|
||||
for m in authorized['cookie'].values())
|
||||
for m in authorized.get('cookie', {}).values())
|
||||
cfgmgr = authorized['cfgmgr']
|
||||
if (operation == 'create') and env['PATH_INFO'] == '/sessions/current/async':
|
||||
pagecontent = ""
|
||||
@@ -830,6 +852,14 @@ def resourcehandler_backend(env, start_response):
|
||||
tlvdata.unicode_dictvalues(sessinfo)
|
||||
yield json.dumps(sessinfo)
|
||||
return
|
||||
elif url.startswith('/sessions/current/webauthn/'):
|
||||
if not webauthn:
|
||||
start_response('501 Not Implemented', headers)
|
||||
yield ''
|
||||
return
|
||||
for rsp in webauthn.handle_api_request(url, env, start_response, authorized['username'], cfgmgr, headers, reqbody, authorized):
|
||||
yield rsp
|
||||
return
|
||||
resource = '.' + url[url.rindex('/'):]
|
||||
lquerydict = copy.deepcopy(querydict)
|
||||
try:
|
||||
|
||||
@@ -515,6 +515,8 @@ def get_input_message(path, operation, inputdata, nodes=None, multinode=False,
|
||||
return InputVolumes(path, nodes, inputdata)
|
||||
elif 'inventory/firmware/updates/active' in '/'.join(path) and inputdata:
|
||||
return InputFirmwareUpdate(path, nodes, inputdata, configmanager)
|
||||
elif ('/'.join(path).startswith('power/inlets/') or '/'.join(path).startswith('power/outlets/')) and inputdata:
|
||||
return InputPowerMessage(path, nodes, inputdata)
|
||||
elif '/'.join(path).startswith('media/detach'):
|
||||
return DetachMedia(path, nodes, inputdata)
|
||||
elif '/'.join(path).startswith('media/') and inputdata:
|
||||
|
||||
@@ -0,0 +1,124 @@
|
||||
# Copyright 2022 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import pyghmi.util.webclient as wc
|
||||
import confluent.util as util
|
||||
import confluent.messages as msg
|
||||
import confluent.exceptions as exc
|
||||
|
||||
|
||||
class GeistClient(object):
|
||||
def __init__(self, pdu, configmanager):
|
||||
self.node = pdu
|
||||
self.configmanager = configmanager
|
||||
self._token = None
|
||||
self._wc = None
|
||||
self.username = None
|
||||
|
||||
@property
|
||||
def token(self):
|
||||
if not self._token:
|
||||
self._token = self.login(self.configmanager)
|
||||
return self._token
|
||||
|
||||
@property
|
||||
def wc(self):
|
||||
if self._wc:
|
||||
return self._wc
|
||||
targcfg = self.configmanager.get_node_attributes(self.node,
|
||||
['hardwaremanagement.manager'],
|
||||
decrypt=True)
|
||||
targcfg = targcfg.get(self.node, {})
|
||||
target = targcfg.get(
|
||||
'hardwaremanagement.manager', {}).get('value', None)
|
||||
if not target:
|
||||
target = self.node
|
||||
cv = util.TLSCertVerifier(
|
||||
self.configmanager, self.node,
|
||||
'pubkeys.tls_hardwaremanager').verify_cert
|
||||
self._wc = wc.SecureHTTPConnection(target, verifycallback=cv)
|
||||
return self._wc
|
||||
|
||||
def login(self, configmanager):
|
||||
credcfg = configmanager.get_node_attributes(self.node,
|
||||
['secret.hardwaremanagementuser',
|
||||
'secret.hardwaremanagementpassword'],
|
||||
decrypt=True)
|
||||
credcfg = credcfg.get(self.node, {})
|
||||
username = credcfg.get(
|
||||
'secret.hardwaremanagementuser', {}).get('value', None)
|
||||
passwd = credcfg.get(
|
||||
'secret.hardwaremanagementpassword', {}).get('value', None)
|
||||
if not isinstance(username, str):
|
||||
username = username.decode('utf8')
|
||||
if not isinstance(passwd, str):
|
||||
passwd = passwd.decode('utf8')
|
||||
if not username or not passwd:
|
||||
raise Exception('Missing username or password')
|
||||
self.username = username
|
||||
rsp = self.wc.grab_json_response(
|
||||
'/api/auth/{0}'.format(username),
|
||||
{'cmd': 'login', 'data': {'password': passwd}})
|
||||
token = rsp['data']['token']
|
||||
return token
|
||||
|
||||
def logout(self):
|
||||
if self._token:
|
||||
self.wc.grab_json_response('/api/auth/{0}'.format(self.username),
|
||||
{'cmd': 'logout', 'token': self.token})
|
||||
self._token = None
|
||||
|
||||
def get_outlet(self, outlet):
|
||||
rsp = self.wc.grab_json_response('/api/dev')
|
||||
rsp = rsp['data']
|
||||
if len(rsp) != 1:
|
||||
raise Exception('Multiple PDUs not supported per pdu')
|
||||
pduname = list(rsp)[0]
|
||||
outlet = rsp[pduname]['outlet'][str(int(outlet) - 1)]
|
||||
state = outlet['state'].split('2')[-1]
|
||||
return state
|
||||
|
||||
def set_outlet(self, outlet, state):
|
||||
rsp = self.wc.grab_json_response('/api/dev')
|
||||
if len(rsp['data']) != 1:
|
||||
self.logout()
|
||||
raise Exception('Multiple PDUs per endpoint not supported')
|
||||
pdu = list(rsp['data'])[0]
|
||||
outlet = int(outlet) - 1
|
||||
rsp = self.wc.grab_json_response(
|
||||
'/api/dev/{0}/outlet/{1}'.format(pdu, outlet),
|
||||
{'cmd': 'control', 'token': self.token,
|
||||
'data': {'action': state, 'delay': False}})
|
||||
|
||||
|
||||
def retrieve(nodes, element, configmanager, inputdata):
|
||||
if 'outlets' not in element:
|
||||
for node in nodes:
|
||||
yield msg.ConfluentResourceUnavailable(node, 'Not implemented')
|
||||
return
|
||||
for node in nodes:
|
||||
gc = GeistClient(node, configmanager)
|
||||
state = gc.get_outlet(element[-1])
|
||||
yield msg.PowerState(node=node, state=state)
|
||||
|
||||
def update(nodes, element, configmanager, inputdata):
|
||||
if 'outlets' not in element:
|
||||
yield msg.ConfluentResourceUnavailable(node, 'Not implemented')
|
||||
return
|
||||
for node in nodes:
|
||||
gc = GeistClient(node, configmanager)
|
||||
newstate = inputdata.powerstate(node)
|
||||
gc.set_outlet(element[-1], newstate)
|
||||
for res in retrieve(nodes, element, configmanager, inputdata):
|
||||
yield res
|
||||
@@ -482,7 +482,7 @@ class IpmiHandler(object):
|
||||
self.tenant = cfg.tenant
|
||||
tenant = cfg.tenant
|
||||
while ((node, tenant) not in persistent_ipmicmds or
|
||||
not persistent_ipmicmds[(node, tenant)].ipmi_session.logged or
|
||||
not (persistent_ipmicmds[(node, tenant)].ipmi_session.logged or persistent_ipmicmds[(node, tenant)].ipmi_session.logging) or
|
||||
persistent_ipmicmds[(node, tenant)].ipmi_session.broken):
|
||||
try:
|
||||
persistent_ipmicmds[(node, tenant)].close_confluent()
|
||||
@@ -514,6 +514,11 @@ class IpmiHandler(object):
|
||||
raise exc.TargetEndpointUnreachable(ge.strerror)
|
||||
raise
|
||||
self.ipmicmd = persistent_ipmicmds[(node, tenant)]
|
||||
giveup = util.monotonic_time() + 60
|
||||
while not self.ipmicmd.ipmi_session.broken and not self.ipmicmd.ipmi_session.logged and self.ipmicmd.ipmi_session.logging:
|
||||
self.ipmicmd.ipmi_session.wait_for_rsp(3)
|
||||
if util.monotonic_time() > giveup:
|
||||
self.ipmicmd.ipmi_session.broken = True
|
||||
|
||||
bootdevices = {
|
||||
'optical': 'cd'
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
# Copyright 2017 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import confluent.core as core
|
||||
import confluent.messages as msg
|
||||
import pyghmi.exceptions as pygexc
|
||||
import confluent.exceptions as exc
|
||||
|
||||
def retrieve(nodes, element, configmanager, inputdata):
|
||||
emebs = configmanager.get_node_attributes(
|
||||
nodes, (u'power.*pdu', u'power.*outlet'))
|
||||
if element == ['power', 'inlets']:
|
||||
outletnames = set([])
|
||||
for node in nodes:
|
||||
for attrib in emebs[node]:
|
||||
attrib = attrib.replace('power.', '').rsplit('.', 1)
|
||||
if len(attrib) > 1:
|
||||
outletnames.add('inlet_' + attrib[0])
|
||||
else:
|
||||
outletnames.add('default')
|
||||
if outletnames:
|
||||
outletnames.add('all')
|
||||
for inlet in outletnames:
|
||||
yield msg.ChildCollection(inlet)
|
||||
elif len(element) == 3:
|
||||
inletname = element[-1]
|
||||
outlets = get_outlets(nodes, emebs, inletname)
|
||||
for node in outlets:
|
||||
for pgroup in outlets[node]:
|
||||
pdu = outlets[node][pgroup]['pdu']
|
||||
outlet = outlets[node][pgroup]['outlet']
|
||||
for rsp in core.handle_path(
|
||||
'/nodes/{0}/power/outlets/{1}'.format(pdu, outlet),
|
||||
'retrieve', configmanager):
|
||||
yield msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node)
|
||||
|
||||
def get_outlets(nodes, emebs, inletname):
|
||||
outlets = {}
|
||||
for node in nodes:
|
||||
if node not in outlets:
|
||||
outlets[node] = {}
|
||||
for attrib in emebs[node]:
|
||||
v = emebs[node][attrib].get('value', None)
|
||||
if not v:
|
||||
continue
|
||||
attrib = attrib.replace('power.', '').rsplit('.', 1)
|
||||
if len(attrib) > 1:
|
||||
pgroup = 'inlet_' + attrib[0]
|
||||
else:
|
||||
pgroup = 'default'
|
||||
if inletname == 'all' or pgroup == inletname:
|
||||
if pgroup not in outlets[node]:
|
||||
outlets[node][pgroup] = {}
|
||||
outlets[node][pgroup][attrib[-1]] = v
|
||||
return outlets
|
||||
|
||||
|
||||
def update(nodes, element, configmanager, inputdata):
|
||||
emebs = configmanager.get_node_attributes(
|
||||
nodes, (u'power.*pdu', u'power.*outlet'))
|
||||
inletname = element[-1]
|
||||
outlets = get_outlets(nodes, emebs, inletname)
|
||||
for node in outlets:
|
||||
for pgroup in outlets[node]:
|
||||
pdu = outlets[node][pgroup]['pdu']
|
||||
outlet = outlets[node][pgroup]['outlet']
|
||||
for rsp in core.handle_path('/nodes/{0}/power/outlets/{1}'.format(pdu, outlet),
|
||||
'update', configmanager, inputdata={'state': inputdata.powerstate(node)}):
|
||||
yield msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node)
|
||||
@@ -0,0 +1,128 @@
|
||||
import base64
|
||||
import confluent.tlvdata as tlvdata
|
||||
import confluent.util as util
|
||||
import json
|
||||
import pywarp
|
||||
import pywarp.backends
|
||||
import pywarp.credentials
|
||||
|
||||
challenges = {}
|
||||
|
||||
class ConfluentBackend(pywarp.backends.CredentialStorageBackend):
|
||||
def __init__(self, cfg):
|
||||
self.cfg = cfg
|
||||
|
||||
def get_credential_ids_by_email(self, email):
|
||||
if not isinstance(email, str):
|
||||
email = email.decode('utf8')
|
||||
authenticators = self.cfg.get_user(email).get('authenticators', {})
|
||||
if not authenticators:
|
||||
raise Exception('No authenticators found')
|
||||
for cid in authenticators:
|
||||
yield base64.b64decode(cid)
|
||||
|
||||
def get_credential_by_email_id(self, email, id):
|
||||
if not isinstance(email, str):
|
||||
email = email.decode('utf8')
|
||||
authenticators = self.cfg.get_user(email).get('authenticators', {})
|
||||
cid = base64.b64encode(id).decode('utf8')
|
||||
pk = authenticators[cid]['cpk']
|
||||
pk = base64.b64decode(pk)
|
||||
return pywarp.credentials.Credential(credential_id=id, credential_public_key=pk)
|
||||
|
||||
def get_credential_by_email(self, email):
|
||||
if not isinstance(email, str):
|
||||
email = email.decode('utf8')
|
||||
authenticators = self.cfg.get_user(email)
|
||||
cid = list(authenticators)[0]
|
||||
cred = authenticators[cid]
|
||||
cid = base64.b64decode(cred['cid'])
|
||||
cpk = base64.b64decode(cred['cpk'])
|
||||
return pywarp.credentials.Credential(credential_id=cid, credential_public_key=cpk)
|
||||
|
||||
def save_credential_for_user(self, email, credential):
|
||||
if not isinstance(email, str):
|
||||
email = email.decode('utf8')
|
||||
cid = base64.b64encode(credential.id).decode('utf8')
|
||||
credential = {'cid': cid, 'cpk': base64.b64encode(bytes(credential.public_key)).decode('utf8')}
|
||||
authenticators = self.cfg.get_user(email).get('authenticators', {})
|
||||
authenticators[cid] = credential
|
||||
self.cfg.set_user(email, {'authenticators': authenticators})
|
||||
|
||||
def save_challenge_for_user(self, email, challenge, type):
|
||||
if not isinstance(email, str):
|
||||
email = email.decode('utf8')
|
||||
challenges[email] = challenge
|
||||
|
||||
def get_challenge_for_user(self, email, type):
|
||||
if not isinstance(email, str):
|
||||
email = email.decode('utf8')
|
||||
return challenges[email]
|
||||
|
||||
|
||||
def handle_api_request(url, env, start_response, username, cfm, headers, reqbody, authorized):
|
||||
if env['REQUEST_METHOD'] != 'POST':
|
||||
raise Exception('Only POST supported for webauthn operations')
|
||||
url = url.replace('/sessions/current/webauthn', '')
|
||||
if url == '/registration_options':
|
||||
rp = pywarp.RelyingPartyManager('Confluent Web UI', credential_storage_backend=ConfluentBackend(cfm), require_attestation=False)
|
||||
userinfo = cfm.get_user(username)
|
||||
if not userinfo:
|
||||
cfm.create_user(username, role='Stub')
|
||||
userinfo = cfm.get_user(username)
|
||||
authid = userinfo.get('authid', None)
|
||||
if not authid:
|
||||
authid = util.randomstring(64)
|
||||
cfm.set_user(username, {'authid': authid})
|
||||
opts = rp.get_registration_options(username)
|
||||
# pywarp generates an id derived
|
||||
# from username, which is a 'must not' in the spec
|
||||
# we replace that with a complying approach
|
||||
opts['user']['id'] = authid
|
||||
if 'icon' in opts['user']:
|
||||
del opts['user']['icon']
|
||||
if 'id' in opts['rp']:
|
||||
del opts['rp']['id']
|
||||
start_response('200 OK', headers)
|
||||
yield json.dumps(opts)
|
||||
elif url.startswith('/registered_credentials/'):
|
||||
username = url.rsplit('/', 1)[-1]
|
||||
rp = pywarp.RelyingPartyManager('Confluent Web UI', credential_storage_backend=ConfluentBackend(cfm))
|
||||
if not isinstance(username, bytes):
|
||||
username = username.encode('utf8')
|
||||
opts = rp.get_authentication_options(username)
|
||||
opts['challenge'] = base64.b64encode(opts['challenge']).decode('utf8')
|
||||
start_response('200 OK', headers)
|
||||
yield json.dumps(opts)
|
||||
elif url.startswith('/validate/'):
|
||||
username = url.rsplit('/', 1)[-1]
|
||||
if not isinstance(username, bytes):
|
||||
username = username.encode('utf8')
|
||||
rp = pywarp.RelyingPartyManager('Confluent Web UI', credential_storage_backend=ConfluentBackend(cfm))
|
||||
req = json.loads(reqbody)
|
||||
for x in req:
|
||||
req[x] = base64.b64decode(req[x].replace('-', '+').replace('_', '/'))
|
||||
req['email'] = username
|
||||
rsp = rp.verify(**req)
|
||||
if start_response:
|
||||
start_response('200 OK', headers)
|
||||
sessinfo = {'username': username}
|
||||
if 'authtoken' in authorized:
|
||||
sessinfo['authtoken'] = authorized['authtoken']
|
||||
if 'sessionid' in authorized:
|
||||
sessinfo['sessionid'] = authorized['sessionid']
|
||||
tlvdata.unicode_dictvalues(sessinfo)
|
||||
yield json.dumps(sessinfo)
|
||||
else:
|
||||
yield rsp
|
||||
elif url == '/register_credential':
|
||||
rp = pywarp.RelyingPartyManager('Confluent Web UI', credential_storage_backend=ConfluentBackend(cfm), require_attestation=False)
|
||||
req = json.loads(reqbody)
|
||||
for x in req:
|
||||
req[x] = base64.b64decode(req[x].replace('-', '+').replace('_', '/'))
|
||||
if not isinstance(username, bytes):
|
||||
username = username.encode('utf8')
|
||||
req['email'] = username
|
||||
rsp = rp.register(**req)
|
||||
start_response('200 OK', headers)
|
||||
yield json.dumps(rsp)
|
||||
@@ -17,7 +17,7 @@ Requires: confluent_vtbufferd
|
||||
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-monotonic, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
|
||||
%else
|
||||
%if "%{dist}" == ".el9"
|
||||
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-monotonic, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
|
||||
Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
|
||||
%else
|
||||
Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python2-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic
|
||||
%endif
|
||||
|
||||
@@ -10,7 +10,7 @@ dracut_install awk egrep dirname bc expr sort
|
||||
dracut_install ssh sshd vi reboot lspci parted tmux mkfs mkfs.ext4 mkfs.xfs xfs_db mkswap
|
||||
dracut_install efibootmgr
|
||||
dracut_install du df ssh-keygen scp clear dhclient lldpd lldpcli tee
|
||||
dracut_install /lib64/libnss_dns-2.28.so /lib64/libnss_dns.so.2
|
||||
dracut_install /lib64/libnss_dns-2.28.so /lib64/libnss_dns.so.2 /lib64/libnss_myhostname.so.2
|
||||
dracut_install ldd uptime /usr/lib64/libnl-3.so.200
|
||||
dracut_install poweroff date /etc/nsswitch.conf /etc/services /etc/protocols
|
||||
dracut_install /usr/share/terminfo/x/xterm /usr/share/terminfo/l/linux /usr/share/terminfo/v/vt100 /usr/share/terminfo/x/xterm-color /usr/share/terminfo/s/screen /usr/share/terminfo/x/xterm-256color /usr/share/terminfo/p/putty-256color /usr/share/terminfo/p/putty /usr/share/terminfo/d/dumb
|
||||
|
||||
Reference in New Issue
Block a user