mirror of
https://github.com/xcat2/confluent.git
synced 2026-01-11 18:42:29 +00:00
Move XCC detection to SSDP
XCC will be retiring SLP and to prepare for that, move over to SSDP. This required that the snoop be wired up to detection correctly, that slp no longer report XCC, that the model/serial number be moved to xcc handler, and various fixes to SSDP.
This commit is contained in:
@@ -635,18 +635,6 @@ def detected(info):
|
||||
eventlet.spawn_after(10, info['protocol'].fix_info, info,
|
||||
safe_detected)
|
||||
return
|
||||
try:
|
||||
snum = info['attributes']['enclosure-serial-number'][0].strip()
|
||||
if snum:
|
||||
info['serialnumber'] = snum
|
||||
known_serials[info['serialnumber']] = info
|
||||
except (KeyError, IndexError):
|
||||
pass
|
||||
try:
|
||||
info['modelnumber'] = info['attributes']['enclosure-machinetype-model'][0]
|
||||
known_services[service].add(info['modelnumber'])
|
||||
except (KeyError, IndexError):
|
||||
pass
|
||||
if info['hwaddr'] in known_info and 'addresses' in info:
|
||||
# we should tee these up for parsing when an enclosure comes up
|
||||
# also when switch config parameters change, should discard
|
||||
@@ -678,6 +666,22 @@ def detected(info):
|
||||
if handler:
|
||||
handler = handler.NodeHandler(info, cfg)
|
||||
handler.scan()
|
||||
try:
|
||||
if 'modelnumber' not in info:
|
||||
info['modelnumber'] = info['attributes']['enclosure-machinetype-model'][0]
|
||||
except (KeyError, IndexError):
|
||||
pass
|
||||
if 'modelnumber' in info:
|
||||
known_services[service].add(info['modelnumber'])
|
||||
try:
|
||||
if 'serialnumber' not in info:
|
||||
snum = info['attributes']['enclosure-serial-number'][0].strip()
|
||||
if snum:
|
||||
info['serialnumber'] = snum
|
||||
except (KeyError, IndexError):
|
||||
pass
|
||||
if 'serialnumber' in info:
|
||||
known_serials[info['serialnumber']] = info
|
||||
uuid = info.get('uuid', None)
|
||||
if uuid_is_valid(uuid):
|
||||
known_uuids[uuid][info['hwaddr']] = info
|
||||
@@ -1340,7 +1344,7 @@ def start_detection():
|
||||
if rechecker is None:
|
||||
rechecktime = util.monotonic_time() + 900
|
||||
rechecker = eventlet.spawn_after(900, _periodic_recheck, cfg)
|
||||
eventlet.spawn_n(ssdp.snoop, None, None, ssdp, get_node_by_uuid_or_mac)
|
||||
eventlet.spawn_n(ssdp.snoop, safe_detected, None, ssdp, get_node_by_uuid_or_mac)
|
||||
|
||||
def stop_autosense():
|
||||
for watcher in list(autosensors):
|
||||
|
||||
@@ -30,15 +30,6 @@ import struct
|
||||
getaddrinfo = eventlet.support.greendns.getaddrinfo
|
||||
|
||||
|
||||
def fixup_uuid(uuidprop):
|
||||
baduuid = ''.join(uuidprop.split())
|
||||
uuidprefix = (baduuid[:8], baduuid[8:12], baduuid[12:16])
|
||||
a = codecs.encode(struct.pack('<IHH', *[int(x, 16) for x in uuidprefix]), 'hex')
|
||||
a = util.stringify(a)
|
||||
uuid = (a[:8], a[8:12], a[12:16], baduuid[16:20], baduuid[20:])
|
||||
return '-'.join(uuid).upper()
|
||||
|
||||
|
||||
class LockedUserException(Exception):
|
||||
pass
|
||||
|
||||
@@ -63,6 +54,9 @@ class NodeHandler(immhandler.NodeHandler):
|
||||
# This is not adequate for being satisfied
|
||||
return bool(info.get('attributes', {}))
|
||||
|
||||
def probe(self):
|
||||
return None
|
||||
|
||||
def scan(self):
|
||||
c = webclient.SecureHTTPConnection(self.ipaddr, 443,
|
||||
verifycallback=self.validate_cert)
|
||||
@@ -70,7 +64,40 @@ class NodeHandler(immhandler.NodeHandler):
|
||||
modelname = i.get('items', [{}])[0].get('machine_name', None)
|
||||
if modelname:
|
||||
self.info['modelname'] = modelname
|
||||
super(NodeHandler, self).scan()
|
||||
for attrname in list(self.info.get('attributes', {})):
|
||||
val = self.info['attributes'][attrname]
|
||||
if '-uuid' == attrname[-5:] and len(val) == 32:
|
||||
val = val.lower()
|
||||
self.info['attributes'][attrname] = '-'.join([val[:8], val[8:12], val[12:16], val[16:20], val[20:]])
|
||||
attrs = self.info.get('attributes', {})
|
||||
room = attrs.get('room-id', None)
|
||||
if room:
|
||||
self.info['room'] = room
|
||||
rack = attrs.get('rack-id', None)
|
||||
if rack:
|
||||
self.info['rack'] = rack
|
||||
name = attrs.get('name', None)
|
||||
if name:
|
||||
self.info['hostname'] = name
|
||||
unumber = attrs.get('lowest-u', None)
|
||||
if unumber:
|
||||
self.info['u'] = unumber
|
||||
location = attrs.get('location', None)
|
||||
if location:
|
||||
self.info['location'] = location
|
||||
mtm = attrs.get('enclosure-machinetype-model', None)
|
||||
if mtm:
|
||||
self.info['modelnumber'] = mtm.strip()
|
||||
sn = attrs.get('enclosure-serial-number', None)
|
||||
if sn:
|
||||
self.info['serialnumber'] = sn.strip()
|
||||
if attrs.get('enclosure-form-factor', None) == 'dense-computing':
|
||||
encuuid = attrs.get('chassis-uuid', None)
|
||||
if encuuid:
|
||||
self.info['enclosure.uuid'] = encuuid
|
||||
slot = int(attrs.get('slot', 0))
|
||||
if slot != 0:
|
||||
self.info['enclosure.bay'] = slot
|
||||
|
||||
def preconfig(self, possiblenode):
|
||||
self.tmpnodename = possiblenode
|
||||
@@ -499,7 +526,7 @@ class NodeHandler(immhandler.NodeHandler):
|
||||
ff = self.info.get('attributes', {}).get('enclosure-form-factor', '')
|
||||
if ff not in ('dense-computing', [u'dense-computing']):
|
||||
return
|
||||
enclosureuuid = self.info.get('attributes', {}).get('chassis-uuid', [None])[0]
|
||||
enclosureuuid = self.info.get('enclosure.uuid', None)
|
||||
if enclosureuuid:
|
||||
enclosureuuid = enclosureuuid.lower()
|
||||
em = self.configmanager.get_node_attributes(nodename,
|
||||
|
||||
@@ -31,7 +31,7 @@ _slp_services = set([
|
||||
'service:lenovo-smm2',
|
||||
'service:ipmi',
|
||||
'service:lighttpd',
|
||||
'service:management-hardware.Lenovo:lenovo-xclarity-controller',
|
||||
#'service:management-hardware.Lenovo:lenovo-xclarity-controller',
|
||||
'service:management-hardware.IBM:chassis-management-module',
|
||||
'service:management-hardware.Lenovo:chassis-management-module',
|
||||
'service:io-device.Lenovo:management-module',
|
||||
|
||||
@@ -39,14 +39,12 @@ import eventlet
|
||||
import eventlet.green.select as select
|
||||
import eventlet.green.socket as socket
|
||||
import eventlet.greenpool as gp
|
||||
import os
|
||||
import time
|
||||
try:
|
||||
from eventlet.green.urllib.request import urlopen
|
||||
except (ImportError, AssertionError):
|
||||
from eventlet.green.urllib2 import urlopen
|
||||
import struct
|
||||
import traceback
|
||||
|
||||
webclient = eventlet.import_patched('pyghmi.util.webclient')
|
||||
mcastv4addr = '239.255.255.250'
|
||||
mcastv6addr = 'ff02::c'
|
||||
|
||||
@@ -123,6 +121,11 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
|
||||
# dabbling in multicast wizardry here, such sockets can cause big problems,
|
||||
# so we will have two distinct sockets
|
||||
tracelog = log.Logger('trace')
|
||||
try:
|
||||
active_scan(handler, protocol)
|
||||
except Exception as e:
|
||||
tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
|
||||
event=log.Events.stacktrace)
|
||||
known_peers = set([])
|
||||
net6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
|
||||
net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
|
||||
@@ -167,8 +170,9 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
|
||||
continue
|
||||
mac = neighutil.get_hwaddr(peer[0])
|
||||
if not mac:
|
||||
probepeer = (peer[0], struct.unpack('H', os.urandom(2))[0] | 1025) + peer[2:]
|
||||
try:
|
||||
s.sendto(b'\x00', peer)
|
||||
s.sendto(b'\x00', probepeer)
|
||||
except Exception:
|
||||
continue
|
||||
deferrednotifies.append((peer, rsp))
|
||||
@@ -244,6 +248,8 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
|
||||
r = r[0]
|
||||
if deferrednotifies:
|
||||
eventlet.sleep(2.2)
|
||||
for peerrsp in deferrednotifies:
|
||||
peer, rsp = peerrsp
|
||||
mac = neighutil.get_hwaddr(peer[0])
|
||||
if not mac:
|
||||
continue
|
||||
@@ -257,6 +263,12 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
|
||||
event=log.Events.stacktrace)
|
||||
|
||||
|
||||
def _get_svrip(peerdata):
|
||||
for addr in peerdata['addresses']:
|
||||
if addr[0].startswith('fe80::'):
|
||||
return addr[0]
|
||||
return peerdata['addresses'][0][0]
|
||||
|
||||
def _find_service(service, target):
|
||||
net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
net6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
|
||||
@@ -314,8 +326,9 @@ def _find_service(service, target):
|
||||
for s in r:
|
||||
(rsp, peer) = s.recvfrom(9000)
|
||||
if not neighutil.get_hwaddr(peer[0]):
|
||||
probepeer = (peer[0], struct.unpack('H', os.urandom(2))[0] | 1025) + peer[2:]
|
||||
try:
|
||||
s.sendto(b'\x00', peer)
|
||||
s.sendto(b'\x00', probepeer)
|
||||
except Exception:
|
||||
continue
|
||||
deferparse.append((rsp, peer))
|
||||
@@ -333,25 +346,45 @@ def _find_service(service, target):
|
||||
querypool = gp.GreenPool()
|
||||
pooltargs = []
|
||||
for nid in peerdata:
|
||||
for url in peerdata[nid].get('urls', ()):
|
||||
if url.endswith('/desc.tmpl'):
|
||||
pooltargs.append((url, peerdata[nid]))
|
||||
for pi in querypool.imap(check_cpstorage, pooltargs):
|
||||
if '/redfish/v1/' not in peerdata[nid].get('urls', ()):
|
||||
break
|
||||
if '/DeviceDescription.json' in peerdata[nid]['urls']:
|
||||
pooltargs.append(('/DeviceDescription.json', peerdata[nid]))
|
||||
# For now, don't interrogate generic redfish bmcs
|
||||
# This is due to a need to deduplicate from some supported SLP
|
||||
# targets (IMM, TSM, others)
|
||||
# activate this else once the core filters/merges duplicate uuid
|
||||
# or we drop support for those devices
|
||||
#else:
|
||||
# pooltargs.append(('/redfish/v1/', peerdata[nid]))
|
||||
for pi in querypool.imap(check_fish, pooltargs):
|
||||
if pi is not None:
|
||||
yield pi
|
||||
|
||||
def check_cpstorage(urldata):
|
||||
def check_fish(urldata):
|
||||
url, data = urldata
|
||||
try:
|
||||
info = urlopen(url, timeout=1).read()
|
||||
if b'<friendlyName>Athena</friendlyName>' in info:
|
||||
data['services'] = ['service:thinkagile-storage']
|
||||
wc = webclient.SecureHTTPConnection(_get_svrip(data), 443, verifycallback=lambda x: True)
|
||||
peerinfo = wc.grab_json_response(url)
|
||||
if url == '/DeviceDescription.json':
|
||||
try:
|
||||
peerinfo = peerinfo[0]
|
||||
myuuid = peerinfo['node-uuid'].lower()
|
||||
if '-' not in myuuid:
|
||||
myuuid = '-'.join([myuuid[:8], myuuid[8:12], myuuid[12:16], myuuid[16:20], myuuid[20:]])
|
||||
data['uuid'] = myuuid
|
||||
data['attributes'] = peerinfo
|
||||
data['services'] = ['service:management-hardware.Lenovo:lenovo-xclarity-controller']
|
||||
return data
|
||||
except (IndexError, KeyError):
|
||||
url = '/redfish/v1/'
|
||||
peerinfo = wc.grab_json_response('/redfish/v1/')
|
||||
if url == '/redfish/v1/':
|
||||
if 'UUID' in peerinfo:
|
||||
data['services'] = ['service:redfish-bmc']
|
||||
data['uuid'] = peerinfo['UUID'].lower()
|
||||
return data
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def _parse_ssdp(peer, rsp, peerdata):
|
||||
nid = peer[0]
|
||||
mac = None
|
||||
@@ -378,9 +411,11 @@ def _parse_ssdp(peer, rsp, peerdata):
|
||||
if not headline:
|
||||
continue
|
||||
header, _, value = headline.partition(b':')
|
||||
header = header.strip()
|
||||
value = value.strip()
|
||||
header = header.strip().decode('utf8')
|
||||
value = value.strip().decode('utf8')
|
||||
if header == 'AL' or header == 'LOCATION':
|
||||
value = value[value.index('://')+3:]
|
||||
value = value[value.index('/'):]
|
||||
if 'urls' not in peerdatum:
|
||||
peerdatum['urls'] = [value]
|
||||
elif value not in peerdatum['urls']:
|
||||
|
||||
Reference in New Issue
Block a user