mirror of
https://github.com/xcat2/confluent.git
synced 2026-05-01 04:47:45 +00:00
Fold aiohmi into confluent
If someone asks for it independently, we can break it out again. But for now, assume it's only for confluent.
This commit is contained in:
0
confluent_server/aiohmi/__init__.py
Normal file
0
confluent_server/aiohmi/__init__.py
Normal file
0
confluent_server/aiohmi/cmd/__init__.py
Normal file
0
confluent_server/aiohmi/cmd/__init__.py
Normal file
97
confluent_server/aiohmi/cmd/fakebmc.py
Executable file
97
confluent_server/aiohmi/cmd/fakebmc.py
Executable file
@@ -0,0 +1,97 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""this is a quick sample of how to write something that acts like a bmc
|
||||
to play:
|
||||
run fakebmc
|
||||
# ipmitool -I lanplus -U admin -P password -H 127.0.0.1 power status
|
||||
Chassis Power is off
|
||||
# ipmitool -I lanplus -U admin -P password -H 127.0.0.1 power on
|
||||
Chassis Power Control: Up/On
|
||||
# ipmitool -I lanplus -U admin -P password -H 127.0.0.1 power status
|
||||
Chassis Power is on
|
||||
# ipmitool -I lanplus -U admin -P password -H 127.0.0.1 mc reset cold
|
||||
Sent cold reset command to MC
|
||||
(fakebmc exits)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
import aiohmi.ipmi.bmc as bmc
|
||||
|
||||
|
||||
class FakeBmc(bmc.Bmc):
|
||||
def __init__(self, authdata, port):
|
||||
super(FakeBmc, self).__init__(authdata, port)
|
||||
self.powerstate = 'off'
|
||||
self.bootdevice = 'default'
|
||||
|
||||
def get_boot_device(self):
|
||||
return self.bootdevice
|
||||
|
||||
def set_boot_device(self, bootdevice):
|
||||
self.bootdevice = bootdevice
|
||||
|
||||
def cold_reset(self):
|
||||
# Reset of the BMC, not managed system, here we will exit the demo
|
||||
print('shutting down in response to BMC cold reset request')
|
||||
sys.exit(0)
|
||||
|
||||
def get_power_state(self):
|
||||
return self.powerstate
|
||||
|
||||
def power_off(self):
|
||||
# this should be power down without waiting for clean shutdown
|
||||
self.powerstate = 'off'
|
||||
print('abruptly remove power')
|
||||
|
||||
def power_on(self):
|
||||
self.powerstate = 'on'
|
||||
print('powered on')
|
||||
|
||||
def power_reset(self):
|
||||
pass
|
||||
|
||||
def power_shutdown(self):
|
||||
# should attempt a clean shutdown
|
||||
print('politely shut down the system')
|
||||
self.powerstate = 'off'
|
||||
|
||||
def is_active(self):
|
||||
return self.powerstate == 'on'
|
||||
|
||||
def iohandler(self, data):
|
||||
print(data)
|
||||
if self.sol:
|
||||
self.sol.send_data(data)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog='fakebmc',
|
||||
description='Pretend to be a BMC',
|
||||
)
|
||||
parser.add_argument('--port',
|
||||
dest='port',
|
||||
type=int,
|
||||
default=623,
|
||||
help='Port to listen on; defaults to 623')
|
||||
args = parser.parse_args()
|
||||
mybmc = FakeBmc({'admin': 'password'}, port=args.port)
|
||||
mybmc.listen()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
88
confluent_server/aiohmi/cmd/pyghmicons.py
Executable file
88
confluent_server/aiohmi/cmd/pyghmicons.py
Executable file
@@ -0,0 +1,88 @@
|
||||
# Copyright 2013 IBM Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" A simple little script to exemplify/test ipmi.console module """
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import select
|
||||
import sys
|
||||
import termios
|
||||
import threading
|
||||
import tty
|
||||
|
||||
|
||||
from aiohmi.ipmi import console
|
||||
|
||||
|
||||
def _doinput(sol):
|
||||
while True:
|
||||
select.select((sys.stdin,), (), (), 600)
|
||||
try:
|
||||
data = sys.stdin.read()
|
||||
except (IOError, OSError) as e:
|
||||
if e.errno == 11:
|
||||
continue
|
||||
raise
|
||||
|
||||
sol.send_data(data)
|
||||
|
||||
|
||||
def _print(data):
|
||||
bailout = False
|
||||
if not isinstance(data, str):
|
||||
bailout = True
|
||||
data = repr(data)
|
||||
sys.stdout.write(data)
|
||||
sys.stdout.flush()
|
||||
if bailout:
|
||||
raise Exception(data)
|
||||
|
||||
|
||||
def main():
|
||||
tcattr = termios.tcgetattr(sys.stdin)
|
||||
newtcattr = tcattr
|
||||
# TODO(jbjohnso): add our exit handler
|
||||
newtcattr[-1][termios.VINTR] = 0
|
||||
newtcattr[-1][termios.VSUSP] = 0
|
||||
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, newtcattr)
|
||||
|
||||
tty.setraw(sys.stdin.fileno())
|
||||
currfl = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL)
|
||||
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, currfl | os.O_NONBLOCK)
|
||||
|
||||
try:
|
||||
if sys.argv[3] is None:
|
||||
passwd = os.environ['IPMIPASSWORD']
|
||||
else:
|
||||
passwd_file = sys.argv[3]
|
||||
with open(passwd_file, "r") as f:
|
||||
passwd = f.read()
|
||||
|
||||
sol = console.Console(bmc=sys.argv[1], userid=sys.argv[2],
|
||||
password=passwd, iohandler=_print, force=True)
|
||||
inputthread = threading.Thread(target=_doinput, args=(sol,))
|
||||
inputthread.daemon = True
|
||||
inputthread.start()
|
||||
sol.main_loop()
|
||||
|
||||
except Exception:
|
||||
currfl = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL)
|
||||
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, currfl ^ os.O_NONBLOCK)
|
||||
termios.tcsetattr(sys.stdin, termios.TCSANOW, tcattr)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
92
confluent_server/aiohmi/cmd/pyghmiutil.py
Executable file
92
confluent_server/aiohmi/cmd/pyghmiutil.py
Executable file
@@ -0,0 +1,92 @@
|
||||
# Copyright 2013 IBM Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This is an example of using the library in a synchronous fashion. For now,
|
||||
it isn't conceived as a general utility to actually use, just help developers
|
||||
understand how the ipmi_command class workes.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import os
|
||||
import sys
|
||||
|
||||
from aiohmi.ipmi import command
|
||||
|
||||
|
||||
def docommand(args, result, ipmisession):
|
||||
command = args[0]
|
||||
args = args[1:]
|
||||
print("Logged into %s" % ipmisession.bmc)
|
||||
if 'error' in result:
|
||||
print(result['error'])
|
||||
return
|
||||
if command == 'power':
|
||||
if args:
|
||||
print(ipmisession.set_power(args[0], wait=True))
|
||||
else:
|
||||
value = ipmisession.get_power()
|
||||
print("%s: %s" % (ipmisession.bmc, value['powerstate']))
|
||||
elif command == 'bootdev':
|
||||
if args:
|
||||
print(ipmisession.set_bootdev(args[0]))
|
||||
else:
|
||||
print(ipmisession.get_bootdev())
|
||||
elif command == 'sensors':
|
||||
for reading in ipmisession.get_sensor_data():
|
||||
print(reading)
|
||||
elif command == 'health':
|
||||
print(ipmisession.get_health())
|
||||
elif command == 'inventory':
|
||||
for item in ipmisession.get_inventory():
|
||||
print(item)
|
||||
elif command == 'leds':
|
||||
for led in ipmisession.get_leds():
|
||||
print(led)
|
||||
elif command == 'graphical':
|
||||
print(ipmisession.get_graphical_console())
|
||||
elif command == 'net':
|
||||
print(ipmisession.get_net_configuration())
|
||||
elif command == 'raw':
|
||||
print(ipmisession.raw_command(
|
||||
netfn=int(args[0]),
|
||||
command=int(args[1]),
|
||||
data=map(lambda x: int(x, 16), args[2:])))
|
||||
|
||||
|
||||
def main():
|
||||
if (len(sys.argv) < 3) or 'IPMIPASSWORD' not in os.environ:
|
||||
print("Usage:")
|
||||
print(" IPMIPASSWORD=password %s bmc username <cmd> <optarg>" %
|
||||
sys.argv[0])
|
||||
return 1
|
||||
|
||||
password = os.environ['IPMIPASSWORD']
|
||||
os.environ['IPMIPASSWORD'] = ""
|
||||
bmc = sys.argv[1]
|
||||
userid = sys.argv[2]
|
||||
|
||||
bmcs = bmc.split(',')
|
||||
ipmicmd = None
|
||||
for bmc in bmcs:
|
||||
# NOTE(etingof): is it right to have `ipmicmd` overridden?
|
||||
ipmicmd = command.Command(
|
||||
bmc=bmc, userid=userid, password=password,
|
||||
onlogon=functools.partial(docommand, sys.argv[3:]))
|
||||
|
||||
if ipmicmd:
|
||||
ipmicmd.eventloop()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
161
confluent_server/aiohmi/cmd/virshbmc.py
Executable file
161
confluent_server/aiohmi/cmd/virshbmc.py
Executable file
@@ -0,0 +1,161 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This is a simple, but working proof of concept of using aiohmi.ipmi.bmc to
|
||||
control a VM
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import threading
|
||||
|
||||
import libvirt
|
||||
|
||||
import aiohmi.ipmi.bmc as bmc
|
||||
|
||||
|
||||
def lifecycle_callback(connection, domain, event, detail, console):
|
||||
console.state = console.domain.state(0)
|
||||
|
||||
|
||||
def error_handler(unused, error):
|
||||
if (error[0] == libvirt.VIR_ERR_RPC
|
||||
and error[1] == libvirt.VIR_FROM_STREAMS):
|
||||
return
|
||||
|
||||
|
||||
def stream_callback(stream, events, console):
|
||||
try:
|
||||
data = console.stream.recv(1024)
|
||||
except Exception:
|
||||
return
|
||||
if console.sol:
|
||||
console.sol.send_data(data)
|
||||
|
||||
|
||||
class LibvirtBmc(bmc.Bmc):
|
||||
"""A class to provide an IPMI interface to the VirtualBox APIs."""
|
||||
|
||||
def __init__(self, authdata, hypervisor, domain, port):
|
||||
super(LibvirtBmc, self).__init__(authdata, port)
|
||||
# Rely on libvirt to throw on bad data
|
||||
self.conn = libvirt.open(hypervisor)
|
||||
self.name = domain
|
||||
self.domain = self.conn.lookupByName(domain)
|
||||
self.state = self.domain.state(0)
|
||||
self.stream = None
|
||||
self.run_console = False
|
||||
self.conn.domainEventRegister(lifecycle_callback, self)
|
||||
self.sol_thread = None
|
||||
|
||||
def cold_reset(self):
|
||||
# Reset of the BMC, not managed system, here we will exit the demo
|
||||
print('shutting down in response to BMC cold reset request')
|
||||
sys.exit(0)
|
||||
|
||||
def get_power_state(self):
|
||||
if self.domain.isActive():
|
||||
return 'on'
|
||||
else:
|
||||
return 'off'
|
||||
|
||||
def power_off(self):
|
||||
if not self.domain.isActive():
|
||||
return 0xd5 # Not valid in this state
|
||||
self.domain.destroy()
|
||||
|
||||
def power_on(self):
|
||||
if self.domain.isActive():
|
||||
return 0xd5 # Not valid in this state
|
||||
self.domain.create()
|
||||
|
||||
def power_reset(self):
|
||||
if not self.domain.isActive():
|
||||
return 0xd5 # Not valid in this state
|
||||
self.domain.reset()
|
||||
|
||||
def power_shutdown(self):
|
||||
if not self.domain.isActive():
|
||||
return 0xd5 # Not valid in this state
|
||||
self.domain.shutdown()
|
||||
|
||||
def is_active(self):
|
||||
return self.domain.isActive()
|
||||
|
||||
def check_console(self):
|
||||
if (self.state[0] == libvirt.VIR_DOMAIN_RUNNING
|
||||
or self.state[0] == libvirt.VIR_DOMAIN_PAUSED):
|
||||
if self.stream is None:
|
||||
self.stream = self.conn.newStream(libvirt.VIR_STREAM_NONBLOCK)
|
||||
self.domain.openConsole(None, self.stream, 0)
|
||||
self.stream.eventAddCallback(libvirt.VIR_STREAM_EVENT_READABLE,
|
||||
stream_callback, self)
|
||||
else:
|
||||
if self.stream:
|
||||
self.stream.eventRemoveCallback()
|
||||
self.stream = None
|
||||
|
||||
return self.run_console
|
||||
|
||||
def activate_payload(self, request, session):
|
||||
super(LibvirtBmc, self).activate_payload(request, session)
|
||||
self.run_console = True
|
||||
self.sol_thread = threading.Thread(target=self.loop)
|
||||
self.sol_thread.start()
|
||||
|
||||
def deactivate_payload(self, request, session):
|
||||
self.run_console = False
|
||||
self.sol_thread.join()
|
||||
super(LibvirtBmc, self).deactivate_payload(request, session)
|
||||
|
||||
def iohandler(self, data):
|
||||
if self.stream:
|
||||
self.stream.send(data)
|
||||
|
||||
def loop(self):
|
||||
while self.check_console():
|
||||
libvirt.virEventRunDefaultImpl()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
prog='virshbmc',
|
||||
description='Pretend to be a BMC and proxy to virsh',
|
||||
formatter_class=argparse.ArgumentDefaultsHelpFormatter
|
||||
)
|
||||
parser.add_argument('--port',
|
||||
dest='port',
|
||||
type=int,
|
||||
default=623,
|
||||
help='(UDP) port to listen on')
|
||||
parser.add_argument('--connect',
|
||||
dest='hypervisor',
|
||||
default='qemu:///system',
|
||||
help='The hypervisor to connect to')
|
||||
parser.add_argument('--domain',
|
||||
dest='domain',
|
||||
required=True,
|
||||
help='The name of the domain to manage')
|
||||
args = parser.parse_args()
|
||||
|
||||
libvirt.virEventRegisterDefaultImpl()
|
||||
libvirt.registerErrorHandler(error_handler, None)
|
||||
|
||||
mybmc = LibvirtBmc({'admin': 'password'},
|
||||
hypervisor=args.hypervisor,
|
||||
domain=args.domain,
|
||||
port=args.port)
|
||||
mybmc.listen()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
18
confluent_server/aiohmi/constants.py
Normal file
18
confluent_server/aiohmi/constants.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright 2014 IBM Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class Health(object):
|
||||
Ok = 0
|
||||
Warning, Critical, Failed = [2**x for x in range(0, 3)]
|
||||
74
confluent_server/aiohmi/exceptions.py
Normal file
74
confluent_server/aiohmi/exceptions.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# Copyright 2013 IBM Corporation
|
||||
# Copyright 2015-2017 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# The Exceptions that Pyghmi can throw
|
||||
|
||||
|
||||
class PyghmiException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class IpmiException(PyghmiException):
|
||||
def __init__(self, text='', code=0):
|
||||
super(IpmiException, self).__init__(text)
|
||||
self.ipmicode = code
|
||||
|
||||
|
||||
class RedfishError(PyghmiException):
|
||||
def __init__(self, text='', msgid=None):
|
||||
super(RedfishError, self).__init__(text)
|
||||
self.msgid = msgid
|
||||
|
||||
|
||||
class UnrecognizedCertificate(Exception):
|
||||
def __init__(self, text='', certdata=None):
|
||||
super(UnrecognizedCertificate, self).__init__(text)
|
||||
self.certdata = certdata
|
||||
|
||||
|
||||
class TemporaryError(Exception):
|
||||
# A temporary condition that should clear, but warrants reporting to the
|
||||
# caller
|
||||
pass
|
||||
|
||||
|
||||
class InvalidParameterValue(PyghmiException):
|
||||
pass
|
||||
|
||||
|
||||
class BmcErrorException(IpmiException):
|
||||
# This denotes when library detects an invalid BMC behavior
|
||||
pass
|
||||
|
||||
|
||||
class UnsupportedFunctionality(PyghmiException):
|
||||
# Indicates when functionality is requested that is not supported by
|
||||
# current endpoint
|
||||
pass
|
||||
|
||||
|
||||
class BypassGenericBehavior(PyghmiException):
|
||||
# Indicates that an OEM handler wants to abort any standards based
|
||||
# follow up
|
||||
pass
|
||||
|
||||
|
||||
class FallbackData(PyghmiException):
|
||||
# Indicates the OEM handler has data to be used if the generic
|
||||
# check comes up empty
|
||||
def __init__(self, fallbackdata):
|
||||
self.fallbackdata = fallbackdata
|
||||
|
||||
pass
|
||||
0
confluent_server/aiohmi/ipmi/__init__.py
Normal file
0
confluent_server/aiohmi/ipmi/__init__.py
Normal file
198
confluent_server/aiohmi/ipmi/bmc.py
Normal file
198
confluent_server/aiohmi/ipmi/bmc.py
Normal file
@@ -0,0 +1,198 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import struct
|
||||
import traceback
|
||||
|
||||
import aiohmi.ipmi.command as ipmicommand
|
||||
import aiohmi.ipmi.console as console
|
||||
import aiohmi.ipmi.private.serversession as serversession
|
||||
import aiohmi.ipmi.private.session as ipmisession
|
||||
|
||||
|
||||
__author__ = 'jjohnson2@lenovo.com'
|
||||
|
||||
|
||||
class Bmc(serversession.IpmiServer):
|
||||
|
||||
activated = False
|
||||
sol = None
|
||||
iohandler = None
|
||||
|
||||
def get_system_guid(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def cold_reset(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def power_off(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def power_on(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def power_cycle(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def power_reset(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def pulse_diag(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def power_shutdown(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_power_state(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def is_active(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def activate_payload(self, request, session):
|
||||
if self.iohandler is None:
|
||||
session.send_ipmi_response(code=0x81)
|
||||
elif not self.is_active():
|
||||
session.send_ipmi_response(code=0x81)
|
||||
elif self.activated:
|
||||
session.send_ipmi_response(code=0x80)
|
||||
else:
|
||||
self.activated = True
|
||||
solport = list(struct.unpack('BB', struct.pack('!H', self.port)))
|
||||
session.send_ipmi_response(
|
||||
data=[0, 0, 0, 0, 1, 0, 1, 0] + solport + [0xff, 0xff])
|
||||
self.sol = console.ServerConsole(session, self.iohandler)
|
||||
|
||||
def deactivate_payload(self, request, session):
|
||||
if self.iohandler is None:
|
||||
session.send_ipmi_response(code=0x81)
|
||||
elif not self.activated:
|
||||
session.send_ipmi_response(code=0x80)
|
||||
else:
|
||||
session.send_ipmi_response()
|
||||
self.sol.close()
|
||||
self.activated = False
|
||||
self.sol = None
|
||||
|
||||
@staticmethod
|
||||
def handle_missing_command(session):
|
||||
session.send_ipmi_response(code=0xc1)
|
||||
|
||||
def get_chassis_status(self, session):
|
||||
try:
|
||||
powerstate = self.get_power_state()
|
||||
except NotImplementedError:
|
||||
return session.send_ipmi_response(code=0xc1)
|
||||
if powerstate in ipmicommand.power_states:
|
||||
powerstate = ipmicommand.power_states[powerstate]
|
||||
if powerstate not in (0, 1):
|
||||
raise Exception('BMC implementation mistake')
|
||||
statusdata = [powerstate, 0, 0]
|
||||
session.send_ipmi_response(data=statusdata)
|
||||
|
||||
def control_chassis(self, request, session):
|
||||
rc = 0
|
||||
try:
|
||||
directive = request['data'][0]
|
||||
if directive == 0:
|
||||
rc = self.power_off()
|
||||
elif directive == 1:
|
||||
rc = self.power_on()
|
||||
elif directive == 2:
|
||||
rc = self.power_cycle()
|
||||
elif directive == 3:
|
||||
rc = self.power_reset()
|
||||
elif directive == 4:
|
||||
# i.e. Pulse a diagnostic interrupt(NMI) directly
|
||||
rc = self.pulse_diag()
|
||||
elif directive == 5:
|
||||
rc = self.power_shutdown()
|
||||
if rc is None:
|
||||
rc = 0
|
||||
session.send_ipmi_response(code=rc)
|
||||
except NotImplementedError:
|
||||
session.send_ipmi_response(code=0xcc)
|
||||
|
||||
def get_boot_device(self):
|
||||
raise NotImplementedError
|
||||
|
||||
def get_system_boot_options(self, request, session):
|
||||
if request['data'][0] == 5: # boot flags
|
||||
try:
|
||||
bootdevice = self.get_boot_device()
|
||||
except NotImplementedError:
|
||||
session.send_ipmi_response(data=[1, 5, 0, 0, 0, 0, 0])
|
||||
if (type(bootdevice) != int
|
||||
and bootdevice in ipmicommand.boot_devices):
|
||||
bootdevice = ipmicommand.boot_devices[bootdevice]
|
||||
paramdata = [1, 5, 0b10000000, bootdevice, 0, 0, 0]
|
||||
return session.send_ipmi_response(data=paramdata)
|
||||
else:
|
||||
session.send_ipmi_response(code=0x80)
|
||||
|
||||
def set_boot_device(self, bootdevice):
|
||||
raise NotImplementedError
|
||||
|
||||
def set_system_boot_options(self, request, session):
|
||||
if request['data'][0] in (0, 3, 4):
|
||||
# for now, just smile and nod at boot flag bit clearing
|
||||
# implementing it is a burden and implementing it does more to
|
||||
# confuse users than serve a useful purpose
|
||||
session.send_ipmi_response()
|
||||
elif request['data'][0] == 5:
|
||||
bootdevice = (request['data'][2] >> 2) & 0b1111
|
||||
try:
|
||||
bootdevice = ipmicommand.boot_devices[bootdevice]
|
||||
except KeyError:
|
||||
session.send_ipmi_response(code=0xcc)
|
||||
return
|
||||
self.set_boot_device(bootdevice)
|
||||
session.send_ipmi_response()
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
def handle_raw_request(self, request, session):
|
||||
try:
|
||||
if request['netfn'] == 6:
|
||||
if request['command'] == 1: # get device id
|
||||
return self.send_device_id(session)
|
||||
elif request['command'] == 2: # cold reset
|
||||
return session.send_ipmi_response(code=self.cold_reset())
|
||||
elif request['command'] == 0x37: # get system guid
|
||||
guid = self.get_system_guid()
|
||||
return session.send_ipmi_response(code=0x00, data=guid.bytes_le)
|
||||
elif request['command'] == 0x48: # activate payload
|
||||
return self.activate_payload(request, session)
|
||||
elif request['command'] == 0x49: # deactivate payload
|
||||
return self.deactivate_payload(request, session)
|
||||
elif request['netfn'] == 0:
|
||||
if request['command'] == 1: # get chassis status
|
||||
return self.get_chassis_status(session)
|
||||
elif request['command'] == 2: # chassis control
|
||||
return self.control_chassis(request, session)
|
||||
elif request['command'] == 8: # set boot options
|
||||
return self.set_system_boot_options(request, session)
|
||||
elif request['command'] == 9: # get boot options
|
||||
return self.get_system_boot_options(request, session)
|
||||
session.send_ipmi_response(code=0xc1)
|
||||
except NotImplementedError:
|
||||
session.send_ipmi_response(code=0xc1)
|
||||
except Exception:
|
||||
session._send_ipmi_net_payload(code=0xff)
|
||||
traceback.print_exc()
|
||||
|
||||
@classmethod
|
||||
def listen(cls, timeout=30):
|
||||
while True:
|
||||
ipmisession.Session.wait_for_rsp(timeout)
|
||||
2314
confluent_server/aiohmi/ipmi/command.py
Normal file
2314
confluent_server/aiohmi/ipmi/command.py
Normal file
File diff suppressed because it is too large
Load Diff
551
confluent_server/aiohmi/ipmi/console.py
Normal file
551
confluent_server/aiohmi/ipmi/console.py
Normal file
@@ -0,0 +1,551 @@
|
||||
# Copyright 2014 IBM Corporation
|
||||
# Copyright 2015-2019 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
"""This represents the low layer message framing portion of IPMI"""
|
||||
|
||||
import struct
|
||||
import threading
|
||||
|
||||
import aiohmi.exceptions as exc
|
||||
from aiohmi.ipmi.private import constants
|
||||
from aiohmi.ipmi.private import session
|
||||
from aiohmi.ipmi.private.util import _monotonic_time
|
||||
|
||||
|
||||
class Console(object):
|
||||
"""IPMI SOL class.
|
||||
|
||||
This object represents an SOL channel, multiplexing SOL data with
|
||||
commands issued by ipmi.command.
|
||||
|
||||
:param bmc: hostname or ip address of BMC
|
||||
:param userid: username to use to connect
|
||||
:param password: password to connect to the BMC
|
||||
:param iohandler: Either a function to call with bytes, a filehandle to
|
||||
use for input and output, or a tuple of (input, output)
|
||||
handles
|
||||
:param force: Set to True to force on or False to force off
|
||||
:param kg: optional parameter for BMCs configured to require it
|
||||
"""
|
||||
|
||||
# TODO(jbjohnso): still need an exit and a data callin function
|
||||
def __init__(self, bmc, userid, password,
|
||||
iohandler, port=623,
|
||||
force=False, kg=None):
|
||||
self.outputlock = threading.RLock()
|
||||
self.keepaliveid = None
|
||||
self.connected = False
|
||||
self.broken = False
|
||||
self.out_handler = iohandler
|
||||
self.remseq = 0
|
||||
self.myseq = 0
|
||||
self.lastsize = 0
|
||||
self.retriedpayload = 0
|
||||
self.pendingoutput = []
|
||||
self.awaitingack = False
|
||||
self.activated = False
|
||||
self.force_session = force
|
||||
self.port = port
|
||||
self.ipmi_session = None
|
||||
self.callgotsession = None
|
||||
self.bmc = bmc
|
||||
self.userid = userid
|
||||
self.password = password
|
||||
self.port = port
|
||||
self.kg = kg
|
||||
self.broken = False
|
||||
|
||||
async def connect(self):
|
||||
bmc = self.bmc
|
||||
userid = self.userid
|
||||
password = self.password
|
||||
port = self.port
|
||||
kg = self.kg
|
||||
self.ipmi_session = await session.Session(
|
||||
bmc=bmc, userid=userid, password=password, port=port, kg=kg)
|
||||
# induce one iteration of the loop, now that we would be
|
||||
# prepared for it in theory
|
||||
await self._got_session({})
|
||||
|
||||
|
||||
async def _got_session(self, response):
|
||||
"""Private function to navigate SOL payload activation"""
|
||||
if 'error' in response:
|
||||
await self._print_error(response['error'])
|
||||
return
|
||||
if not self.ipmi_session:
|
||||
self.callgotsession = response
|
||||
return
|
||||
# Send activate sol payload directive
|
||||
# netfn= 6 (application)
|
||||
# command = 0x48 (activate payload)
|
||||
# data = (1, sol payload type
|
||||
# 1, first instance
|
||||
# 0b11000000, -encrypt, authenticate,
|
||||
# disable serial/modem alerts, CTS fine
|
||||
# 0, 0, 0 reserved
|
||||
response = await self.ipmi_session.raw_command(netfn=0x6, command=0x48,
|
||||
data=(1, 1, 192, 0, 0, 0))
|
||||
# given that these are specific to the command,
|
||||
# it's probably best if one can grep the error
|
||||
# here instead of in constants
|
||||
sol_activate_codes = {
|
||||
0x81: 'SOL is disabled',
|
||||
0x82: 'Maximum SOL session count reached',
|
||||
0x83: 'Cannot activate payload with encryption',
|
||||
0x84: 'Cannot activate payload without encryption',
|
||||
}
|
||||
if 'code' in response and response['code']:
|
||||
if response['code'] in constants.ipmi_completion_codes:
|
||||
await self._print_error(
|
||||
constants.ipmi_completion_codes[response['code']])
|
||||
return
|
||||
elif response['code'] == 0x80:
|
||||
if self.force_session and not self.retriedpayload:
|
||||
self.retriedpayload = 1
|
||||
sessrsp = await self.ipmi_session.raw_command(
|
||||
netfn=0x6,
|
||||
command=0x49,
|
||||
data=(1, 1, 0, 0, 0, 0))
|
||||
await self._got_session(sessrsp)
|
||||
return
|
||||
else:
|
||||
await self._print_error('SOL Session active for another client')
|
||||
return
|
||||
elif response['code'] in sol_activate_codes:
|
||||
await self._print_error(sol_activate_codes[response['code']])
|
||||
return
|
||||
else:
|
||||
await self._print_error(
|
||||
'SOL encountered Unrecognized error code %d' %
|
||||
response['code'])
|
||||
return
|
||||
if 'error' in response:
|
||||
self._print_error(response['error'])
|
||||
return
|
||||
self.activated = True
|
||||
# data[0:3] is reserved except for the test mode, which we don't use
|
||||
data = response['data']
|
||||
self.maxoutcount = (data[5] << 8) + data[4]
|
||||
# BMC tells us this is the maximum allowed size
|
||||
# data[6:7] is the promise of how small packets are going to be, but we
|
||||
# don't have any reason to worry about it
|
||||
# some BMCs disagree on the endianness, so do both
|
||||
valid_ports = (self.port, struct.unpack(
|
||||
'<H', struct.pack('>H', self.port))[0])
|
||||
if (data[8] + (data[9] << 8)) not in valid_ports:
|
||||
# TODO(jbjohnso): support atypical SOL port number
|
||||
raise NotImplementedError("Non-standard SOL Port Number")
|
||||
# ignore data[10:11] for now, the vlan detail, shouldn't matter to this
|
||||
# code anyway...
|
||||
# NOTE(jbjohnso):
|
||||
# We will use a special purpose keepalive
|
||||
if self.ipmi_session.sol_handler is not None:
|
||||
# If there is erroneously another SOL handler already, notify
|
||||
# it of newly established session
|
||||
await self.ipmi_session.sol_handler({'error': 'Session Disconnected'})
|
||||
self.keepaliveid = self.ipmi_session.register_keepalive(
|
||||
cmd={'netfn': 6, 'command': 0x4b, 'data': (1, 1)},
|
||||
callback=self._got_payload_instance_info)
|
||||
self.ipmi_session.sol_handler = self._got_sol_payload
|
||||
self.connected = True
|
||||
# self._sendpendingoutput() checks len(self._sendpendingoutput)
|
||||
await self._sendpendingoutput()
|
||||
|
||||
async def _got_payload_instance_info(self, response):
|
||||
if 'error' in response:
|
||||
self.activated = False
|
||||
await self._print_error(response['error'])
|
||||
return
|
||||
currowner = struct.unpack(
|
||||
"<I", struct.pack('4B', *response['data'][:4]))
|
||||
if currowner[0] != self.ipmi_session.sessionid:
|
||||
# the session is deactivated or active for something else
|
||||
self.activated = False
|
||||
await self._print_error('SOL deactivated')
|
||||
return
|
||||
# ok, still here, that means session is alive, but another
|
||||
# common issue is firmware messing with mux on reboot
|
||||
# this would be a nice thing to check, but the serial channel
|
||||
# number is needed and there isn't an obvious means to reliably
|
||||
# discern which channel or even *if* the serial port in question
|
||||
# correlates at all to an ipmi channel to check mux
|
||||
|
||||
def _addpendingdata(self, data):
|
||||
with self.outputlock:
|
||||
if isinstance(data, dict):
|
||||
self.pendingoutput.append(data)
|
||||
else: # it is a text situation
|
||||
if (len(self.pendingoutput) == 0
|
||||
or isinstance(self.pendingoutput[-1], dict)):
|
||||
self.pendingoutput.append(data)
|
||||
else:
|
||||
self.pendingoutput[-1] += data
|
||||
|
||||
def _got_cons_input(self, handle):
|
||||
"""Callback for handle events detected by ipmi session"""
|
||||
|
||||
self._addpendingdata(handle.read())
|
||||
if not self.awaitingack:
|
||||
self._sendpendingoutput()
|
||||
|
||||
async def close(self):
|
||||
"""Shut down an SOL session"""
|
||||
|
||||
if self.ipmi_session:
|
||||
self.ipmi_session.unregister_keepalive(self.keepaliveid)
|
||||
if self.activated and self.ipmi_session is not None:
|
||||
try:
|
||||
await self.ipmi_session.raw_command(netfn=6, command=0x49,
|
||||
data=(1, 1, 0, 0, 0, 0))
|
||||
except exc.IpmiException:
|
||||
# if underlying ipmi session is not working, then
|
||||
# run with the implicit success
|
||||
pass
|
||||
|
||||
async def send_data(self, data):
|
||||
if self.broken:
|
||||
return
|
||||
self._addpendingdata(data)
|
||||
if not self.connected:
|
||||
return
|
||||
if not self.awaitingack:
|
||||
await self._sendpendingoutput()
|
||||
|
||||
async def send_break(self):
|
||||
self._addpendingdata({'break': 1})
|
||||
if not self.connected:
|
||||
return
|
||||
if not self.awaitingack:
|
||||
await self._sendpendingoutput()
|
||||
|
||||
@classmethod
|
||||
def wait_for_rsp(cls, timeout):
|
||||
"""Delay for no longer than timeout for next response.
|
||||
|
||||
This acts like a sleep that exits on activity.
|
||||
|
||||
:param timeout: Maximum number of seconds before returning
|
||||
"""
|
||||
return session.Session.wait_for_rsp(timeout=timeout)
|
||||
|
||||
async def _sendpendingoutput(self):
|
||||
with self.outputlock:
|
||||
dobreak = False
|
||||
chunk = ''
|
||||
if len(self.pendingoutput) == 0:
|
||||
return
|
||||
if isinstance(self.pendingoutput[0], dict):
|
||||
if 'break' in self.pendingoutput[0]:
|
||||
dobreak = True
|
||||
else:
|
||||
del self.pendingoutput[0]
|
||||
raise ValueError
|
||||
del self.pendingoutput[0]
|
||||
elif len(self.pendingoutput[0]) > self.maxoutcount:
|
||||
chunk = self.pendingoutput[0][:self.maxoutcount]
|
||||
self.pendingoutput[0] = self.pendingoutput[0][
|
||||
self.maxoutcount:]
|
||||
else:
|
||||
chunk = self.pendingoutput[0]
|
||||
del self.pendingoutput[0]
|
||||
await self._sendoutput(chunk, sendbreak=dobreak)
|
||||
|
||||
async def _sendoutput(self, output, sendbreak=False):
|
||||
self.myseq += 1
|
||||
self.myseq &= 0xf
|
||||
if self.myseq == 0:
|
||||
self.myseq = 1
|
||||
# currently we don't try to combine ack with outgoing data
|
||||
# so we use 0 for ack sequence number and accepted character
|
||||
# count
|
||||
breakbyte = 0
|
||||
if sendbreak:
|
||||
breakbyte = 0b10000
|
||||
try:
|
||||
payload = bytearray((self.myseq, 0, 0, breakbyte)) + output
|
||||
except TypeError: # bytearray hits unicode...
|
||||
payload = bytearray((self.myseq, 0, 0, breakbyte
|
||||
)) + output.encode('utf8')
|
||||
self.lasttextsize = len(output)
|
||||
needskeepalive = False
|
||||
if self.lasttextsize == 0:
|
||||
needskeepalive = True
|
||||
self.awaitingack = True
|
||||
self.lastpayload = payload
|
||||
await self.send_payload(payload, retry=False, needskeepalive=needskeepalive)
|
||||
retries = 5
|
||||
while retries and self.awaitingack:
|
||||
expiry = _monotonic_time() + 5.5 - retries
|
||||
while self.awaitingack and _monotonic_time() < expiry:
|
||||
await self.wait_for_rsp(0.5)
|
||||
if self.awaitingack:
|
||||
await self.send_payload(payload, retry=False,
|
||||
needskeepalive=needskeepalive)
|
||||
retries -= 1
|
||||
if not retries:
|
||||
await self._print_error('Connection lost')
|
||||
|
||||
async def send_payload(self, payload, payload_type=1, retry=True,
|
||||
needskeepalive=False):
|
||||
while not (self.connected or self.broken):
|
||||
session.Session.wait_for_rsp(timeout=10)
|
||||
if self.ipmi_session is None or not self.ipmi_session.logged:
|
||||
await self._print_error('Session no longer connected')
|
||||
raise exc.IpmiException('Session no longer connected')
|
||||
await self.ipmi_session.send_payload(payload,
|
||||
payload_type=payload_type,
|
||||
retry=retry,
|
||||
needskeepalive=needskeepalive)
|
||||
|
||||
async def _print_info(self, info):
|
||||
await self._print_data({'info': info})
|
||||
|
||||
async def _print_error(self, error):
|
||||
self.broken = True
|
||||
if self.ipmi_session:
|
||||
self.ipmi_session.unregister_keepalive(self.keepaliveid)
|
||||
if (self.ipmi_session.sol_handler
|
||||
and self.ipmi_session.sol_handler.__self__ is self):
|
||||
self.ipmi_session.sol_handler = None
|
||||
self.ipmi_session = None
|
||||
if type(error) == dict:
|
||||
await self._print_data(error)
|
||||
else:
|
||||
await self._print_data({'error': error})
|
||||
|
||||
async def _print_data(self, data):
|
||||
"""Convey received data back to caller in the format of their choice.
|
||||
|
||||
Caller may elect to provide this class filehandle(s) or else give a
|
||||
callback function that this class will use to convey data back to
|
||||
caller.
|
||||
"""
|
||||
await self.out_handler(data)
|
||||
|
||||
async def _got_sol_payload(self, payload):
|
||||
"""SOL payload callback"""
|
||||
|
||||
# TODO(jbjohnso) test cases to throw some likely scenarios at functions
|
||||
# for example, retry with new data, retry with no new data
|
||||
# retry with unexpected sequence number
|
||||
if type(payload) == dict: # we received an error condition
|
||||
self.activated = False
|
||||
await self._print_error(payload)
|
||||
return
|
||||
newseq = payload[0] & 0b1111
|
||||
ackseq = payload[1] & 0b1111
|
||||
ackcount = payload[2]
|
||||
nacked = payload[3] & 0b1000000
|
||||
poweredoff = payload[3] & 0b100000
|
||||
deactivated = payload[3] & 0b10000
|
||||
breakdetected = payload[3] & 0b100
|
||||
# for now, ignore overrun. I assume partial NACK for this reason or
|
||||
# for no reason would be treated the same, new payload with partial
|
||||
# data.
|
||||
remdata = ""
|
||||
remdatalen = 0
|
||||
if newseq != 0: # this packet at least has some data to send to us..
|
||||
if len(payload) > 4:
|
||||
remdatalen = len(payload[4:]) # store remote len before dupe
|
||||
# retry logic, we must ack *this* many even if it is
|
||||
# a retry packet with new partial data
|
||||
remdata = bytes(payload[4:])
|
||||
if newseq == self.remseq: # it is a retry, but could have new data
|
||||
if remdatalen > self.lastsize:
|
||||
remdata = bytes(remdata[4 + self.lastsize:])
|
||||
else: # no new data...
|
||||
remdata = ""
|
||||
else: # TODO(jbjohnso) what if remote sequence number is wrong??
|
||||
self.remseq = newseq
|
||||
self.lastsize = remdatalen
|
||||
if remdata: # Do not subject callers to empty data
|
||||
await self._print_data(remdata)
|
||||
ackpayload = bytearray((0, self.remseq, remdatalen, 0))
|
||||
# Why not put pending data into the ack? because it's rare
|
||||
# and might be hard to decide what to do in the context of
|
||||
# retry situation
|
||||
try:
|
||||
await self.send_payload(ackpayload, retry=False)
|
||||
except exc.IpmiException:
|
||||
# if the session is broken, then close the SOL session
|
||||
self.close()
|
||||
if self.myseq != 0 and ackseq == self.myseq: # the bmc has something
|
||||
# to say about last xmit
|
||||
self.awaitingack = False
|
||||
if nacked and not breakdetected: # the BMC was in some way unhappy
|
||||
if poweredoff:
|
||||
await self._print_info("Remote system is powered down")
|
||||
if deactivated:
|
||||
self.activated = False
|
||||
await self._print_error("Remote IPMI console disconnected")
|
||||
else: # retry all or part of packet, but in a new form
|
||||
# also add pending output for efficiency and ease
|
||||
newtext = self.lastpayload[4 + ackcount:]
|
||||
with self.outputlock:
|
||||
if (self.pendingoutput
|
||||
and not isinstance(self.pendingoutput[0],
|
||||
dict)):
|
||||
self.pendingoutput[0] = \
|
||||
newtext + self.pendingoutput[0]
|
||||
else:
|
||||
self.pendingoutput = [newtext] + self.pendingoutput
|
||||
# self._sendpendingoutput() checks len(self._sendpendingoutput)
|
||||
await self._sendpendingoutput()
|
||||
elif ackseq != 0 and self.awaitingack:
|
||||
# if an ack packet came in, but did not match what we
|
||||
# expected, retry our payload now.
|
||||
# the situation that was triggered was a senseless retry
|
||||
# when data came in while we xmitted. In theory, a BMC
|
||||
# should handle a retry correctly, but some do not, so
|
||||
# try to mitigate by avoiding overeager retries
|
||||
# occasional retry of a packet
|
||||
# sooner than timeout suggests is evidently a big deal
|
||||
await self.send_payload(payload=self.lastpayload, retry=False)
|
||||
|
||||
def main_loop(self):
|
||||
"""Process all events until no more sessions exist.
|
||||
|
||||
If a caller is a simple little utility, provide a function to
|
||||
eternally run the event loop. More complicated usage would be expected
|
||||
to provide their own event loop behavior, though this could be used
|
||||
within the greenthread implementation of caller's choice if desired.
|
||||
"""
|
||||
# wait_for_rsp promises to return a false value when no sessions are
|
||||
# alive anymore
|
||||
# TODO(jbjohnso): wait_for_rsp is not returning a true value for our
|
||||
# own session
|
||||
while (1):
|
||||
session.Session.wait_for_rsp(timeout=600)
|
||||
|
||||
|
||||
class ServerConsole(Console):
|
||||
"""IPMI SOL class.
|
||||
|
||||
This object represents an SOL channel, multiplexing SOL data with
|
||||
commands issued by ipmi.command.
|
||||
|
||||
:param session: IPMI session
|
||||
:param iohandler: I/O handler
|
||||
"""
|
||||
|
||||
def __init__(self, _session, iohandler, force=False):
|
||||
self.outputlock = threading.RLock()
|
||||
self.keepaliveid = None
|
||||
self.connected = True
|
||||
self.broken = False
|
||||
self.out_handler = iohandler
|
||||
self.remseq = 0
|
||||
self.myseq = 0
|
||||
self.lastsize = 0
|
||||
self.retriedpayload = 0
|
||||
self.pendingoutput = []
|
||||
self.awaitingack = False
|
||||
self.activated = True
|
||||
self.force_session = force
|
||||
self.ipmi_session = _session
|
||||
self.ipmi_session.sol_handler = self._got_sol_payload
|
||||
self.maxoutcount = 256
|
||||
self.poweredon = True
|
||||
|
||||
session.Session.wait_for_rsp(0)
|
||||
|
||||
async def _got_sol_payload(self, payload):
|
||||
"""SOL payload callback"""
|
||||
|
||||
# TODO(jbjohnso) test cases to throw some likely scenarios at functions
|
||||
# for example, retry with new data, retry with no new data
|
||||
# retry with unexpected sequence number
|
||||
if type(payload) == dict: # we received an error condition
|
||||
self.activated = False
|
||||
await self._print_error(payload)
|
||||
return
|
||||
newseq = payload[0] & 0b1111
|
||||
ackseq = payload[1] & 0b1111
|
||||
ackcount = payload[2]
|
||||
nacked = payload[3] & 0b1000000
|
||||
breakdetected = payload[3] & 0b10000
|
||||
# for now, ignore overrun. I assume partial NACK for this reason or
|
||||
# for no reason would be treated the same, new payload with partial
|
||||
# data.
|
||||
remdata = ""
|
||||
remdatalen = 0
|
||||
flag = 0
|
||||
if not self.poweredon:
|
||||
flag |= 0b1100000
|
||||
if not self.activated:
|
||||
flag |= 0b1010000
|
||||
if newseq != 0: # this packet at least has some data to send to us..
|
||||
if len(payload) > 4:
|
||||
remdatalen = len(payload[4:]) # store remote len before dupe
|
||||
# retry logic, we must ack *this* many even if it is
|
||||
# a retry packet with new partial data
|
||||
remdata = bytes(payload[4:])
|
||||
if newseq == self.remseq: # it is a retry, but could have new data
|
||||
if remdatalen > self.lastsize:
|
||||
remdata = bytes(remdata[4 + self.lastsize:])
|
||||
else: # no new data...
|
||||
remdata = ""
|
||||
else: # TODO(jbjohnso) what if remote sequence number is wrong??
|
||||
self.remseq = newseq
|
||||
self.lastsize = remdatalen
|
||||
ackpayload = bytearray((0, self.remseq, remdatalen, flag))
|
||||
# Why not put pending data into the ack? because it's rare
|
||||
# and might be hard to decide what to do in the context of
|
||||
# retry situation
|
||||
try:
|
||||
self.send_payload(ackpayload, retry=False)
|
||||
except exc.IpmiException:
|
||||
# if the session is broken, then close the SOL session
|
||||
self.close()
|
||||
if remdata: # Do not subject callers to empty data
|
||||
await self._print_data(remdata)
|
||||
if self.myseq != 0 and ackseq == self.myseq: # the bmc has something
|
||||
# to say about last xmit
|
||||
self.awaitingack = False
|
||||
if nacked and not breakdetected: # the BMC was in some way unhappy
|
||||
newtext = self.lastpayload[4 + ackcount:]
|
||||
with self.outputlock:
|
||||
if (self.pendingoutput
|
||||
and not isinstance(self.pendingoutput[0], dict)):
|
||||
self.pendingoutput[0] = newtext + self.pendingoutput[0]
|
||||
else:
|
||||
self.pendingoutput = [newtext] + self.pendingoutput
|
||||
# self._sendpendingoutput() checks len(self._sendpendingoutput)
|
||||
self._sendpendingoutput()
|
||||
elif ackseq != 0 and self.awaitingack:
|
||||
# if an ack packet came in, but did not match what we
|
||||
# expected, retry our payload now.
|
||||
# the situation that was triggered was a senseless retry
|
||||
# when data came in while we xmitted. In theory, a BMC
|
||||
# should handle a retry correctly, but some do not, so
|
||||
# try to mitigate by avoiding overeager retries
|
||||
# occasional retry of a packet
|
||||
# sooner than timeout suggests is evidently a big deal
|
||||
self.send_payload(payload=self.lastpayload)
|
||||
|
||||
def send_payload(self, payload, payload_type=1, retry=True,
|
||||
needskeepalive=False):
|
||||
while not (self.connected or self.broken):
|
||||
session.Session.wait_for_rsp(timeout=10)
|
||||
self.ipmi_session.send_payload(payload,
|
||||
payload_type=payload_type,
|
||||
retry=retry,
|
||||
needskeepalive=needskeepalive)
|
||||
|
||||
def close(self):
|
||||
"""Shut down an SOL session"""
|
||||
|
||||
self.activated = False
|
||||
589
confluent_server/aiohmi/ipmi/events.py
Normal file
589
confluent_server/aiohmi/ipmi/events.py
Normal file
@@ -0,0 +1,589 @@
|
||||
# Copyright 2016 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import struct
|
||||
import time
|
||||
|
||||
import aiohmi.constants as pygconst
|
||||
import aiohmi.exceptions as pygexc
|
||||
|
||||
try:
|
||||
range = xrange
|
||||
except NameError:
|
||||
pass
|
||||
try:
|
||||
buffer
|
||||
except NameError:
|
||||
buffer = memoryview
|
||||
|
||||
|
||||
psucfg_errors = {
|
||||
0: 'Vendor mismatch',
|
||||
1: 'Revision mismatch',
|
||||
2: 'Processor missing', # e.g. pluggable CPU VRMs...
|
||||
3: 'Insufficient power',
|
||||
4: 'Voltage mismatch',
|
||||
}
|
||||
|
||||
firmware_progress = {
|
||||
0: 'Unspecified',
|
||||
1: 'Memory initialization',
|
||||
2: 'Disk initialization',
|
||||
3: 'Non-primary Processor initialization',
|
||||
4: 'User authentication',
|
||||
5: 'In setup',
|
||||
6: 'USB initialization',
|
||||
7: 'PCI initialization',
|
||||
8: 'Option ROM initialization',
|
||||
9: 'Video initialization',
|
||||
0xa: 'Cache initialization',
|
||||
0xb: 'SMBus initialization',
|
||||
0xc: 'Keyboard initialization',
|
||||
0xd: 'Embedded controller initialization',
|
||||
0xe: 'Docking station attachment',
|
||||
0xf: 'Docking station enabled',
|
||||
0x10: 'Docking station ejection',
|
||||
0x11: 'Docking station disabled',
|
||||
0x12: 'Waking OS',
|
||||
0x13: 'Starting OS boot',
|
||||
0x14: 'Baseboard initialization',
|
||||
0x16: 'Floppy initialization',
|
||||
0x17: 'Keyboard test',
|
||||
0x18: 'Pointing device test',
|
||||
0x19: 'Primary processor initialization',
|
||||
}
|
||||
|
||||
firmware_errors = {
|
||||
0: 'Unspecified',
|
||||
1: 'No memory installed',
|
||||
2: 'All memory failed',
|
||||
3: 'Unrecoverable disk failure',
|
||||
4: 'Unrecoverable board failure',
|
||||
5: 'Unrecoverable diskette failure',
|
||||
6: 'Unrecoverable storage controller failure',
|
||||
7: 'Unrecoverable keyboard failure', # Keyboard error, press
|
||||
# any key to continue..
|
||||
8: 'Removable boot media not found',
|
||||
9: 'Video adapter failure',
|
||||
0xa: 'No video device',
|
||||
0xb: 'Firmware corruption detected',
|
||||
0xc: 'CPU voltage mismatch',
|
||||
0xd: 'CPU speed mismatch',
|
||||
}
|
||||
|
||||
auxlog_actions = {
|
||||
0: 'entry added',
|
||||
1: 'entry added (could not map to standard)',
|
||||
2: 'entry added with corresponding standard events',
|
||||
3: 'log cleared',
|
||||
4: 'log disabled',
|
||||
5: 'log enabled',
|
||||
}
|
||||
|
||||
restart_causes = {
|
||||
0: 'Unknown',
|
||||
1: 'Remote request',
|
||||
2: 'Reset button',
|
||||
3: 'Power button',
|
||||
4: 'Watchdog',
|
||||
5: 'OEM',
|
||||
6: 'Power restored',
|
||||
7: 'Power restored',
|
||||
8: 'Reset due to event',
|
||||
9: 'Cycle due to event',
|
||||
0xa: 'OS reset',
|
||||
0xb: 'Timer wake',
|
||||
}
|
||||
|
||||
slot_types = {
|
||||
0: 'PCI',
|
||||
1: 'Drive Array',
|
||||
2: 'External connector',
|
||||
3: 'Docking',
|
||||
4: 'Other',
|
||||
5: 'Entity ID',
|
||||
6: 'AdvancedTCA',
|
||||
7: 'Memory',
|
||||
8: 'Fan',
|
||||
9: 'PCIe',
|
||||
10: 'SCSI',
|
||||
11: 'SATA/SAS',
|
||||
12: 'USB',
|
||||
}
|
||||
|
||||
power_states = {
|
||||
0: 'S0',
|
||||
1: 'S1',
|
||||
2: 'S2',
|
||||
3: 'S3',
|
||||
4: 'S4',
|
||||
5: 'S5',
|
||||
6: 'S4 or S5',
|
||||
7: 'G3',
|
||||
8: 'S1, S2, or S3',
|
||||
9: 'G1',
|
||||
0xa: 'S5',
|
||||
0xb: 'on',
|
||||
0xc: 'off',
|
||||
}
|
||||
|
||||
watchdog_boot_phases = {
|
||||
1: 'Firmware',
|
||||
2: 'Firmware',
|
||||
3: 'OS Load',
|
||||
4: 'OS',
|
||||
5: 'OEM',
|
||||
}
|
||||
|
||||
version_changes = {
|
||||
1: 'Device ID',
|
||||
2: 'Management controller firmware',
|
||||
3: 'Management controller revision',
|
||||
4: 'Management conroller manufacturer',
|
||||
5: 'IPMI version',
|
||||
6: 'Management controller firmware',
|
||||
7: 'Management controller boot block',
|
||||
8: 'Management controller firmware',
|
||||
9: 'System Firmware (UEFI/BIOS)',
|
||||
0xa: 'SMBIOS',
|
||||
0xb: 'OS',
|
||||
0xc: 'OS Loader',
|
||||
0xd: 'Diagnostics',
|
||||
0xe: 'Management agent',
|
||||
0xf: 'Management application',
|
||||
0x10: 'Management middleware',
|
||||
0x11: 'FPGA',
|
||||
0x12: 'FRU',
|
||||
0x13: 'FRU',
|
||||
0x14: 'Equivalent FRU',
|
||||
0x15: 'Updated FRU',
|
||||
0x16: 'Older FRU',
|
||||
0x17: 'Hardware (switch/jumper)',
|
||||
}
|
||||
|
||||
fru_states = {
|
||||
0: 'Normal',
|
||||
1: 'Externally requested',
|
||||
2: 'Latch',
|
||||
3: 'Hot swap',
|
||||
4: 'Internal action',
|
||||
5: 'Lost communication',
|
||||
6: 'Lost communication',
|
||||
7: 'Unexpected removal',
|
||||
8: 'Operator',
|
||||
9: 'Unable to compute IPMB address',
|
||||
0xa: 'Unexpected deactivation',
|
||||
}
|
||||
|
||||
|
||||
def decode_eventdata(sensor_type, offset, eventdata, event_consts, sdr):
|
||||
"""Decode extra event data from an alert or log
|
||||
|
||||
Provide a textual summary of eventdata per descriptions in
|
||||
Table 42-3 of the specification. This is for sensor specific
|
||||
offset events only.
|
||||
|
||||
:param sensor_type: The sensor type number from the event
|
||||
:param offset: Sensor specific offset
|
||||
:param eventdata: The three bytes from the log or alert
|
||||
:param event_consts: event definition including severity.
|
||||
:param sdr: The sdr locator entry to help clarify how to parse data
|
||||
"""
|
||||
if sensor_type == 5 and offset == 4: # link loss, indicates which port
|
||||
return 'Port {0}'.format(eventdata[1])
|
||||
elif sensor_type == 8 and offset == 6: # PSU cfg error
|
||||
errtype = eventdata[2] & 0b1111
|
||||
return psucfg_errors.get(errtype, 'Unknown')
|
||||
elif sensor_type == 0xC6:
|
||||
return 'PSU Redundancy'
|
||||
elif sensor_type == 0xc and offset == 8: # Memory spare
|
||||
return 'Module {0}'.format(eventdata[2])
|
||||
elif sensor_type == 0xf:
|
||||
if offset == 0: # firmware error
|
||||
return firmware_errors.get(eventdata[1], 'Unknown')
|
||||
elif offset in (1, 2):
|
||||
return firmware_progress.get(eventdata[1], 'Unknown')
|
||||
elif sensor_type == 0x10:
|
||||
if offset == 0: # Correctable error logging on a specific memory part
|
||||
return 'Module {0}'.format(eventdata[1])
|
||||
elif offset == 1:
|
||||
return 'Reading type {0:02X}h, offset {1:02X}h'.format(
|
||||
eventdata[1], eventdata[2] & 0b1111)
|
||||
elif offset == 5:
|
||||
return '{0}%'.format(eventdata[2])
|
||||
elif offset == 6:
|
||||
return 'Processor {0}'.format(eventdata[1])
|
||||
elif sensor_type == 0x12:
|
||||
if offset == 3:
|
||||
action = (eventdata[1] & 0b1111000) >> 4
|
||||
return auxlog_actions.get(action, 'Unknown')
|
||||
elif offset == 4:
|
||||
sysactions = []
|
||||
if eventdata[1] & 0b1 << 5:
|
||||
sysactions.append('NMI')
|
||||
if eventdata[1] & 0b1 << 4:
|
||||
sysactions.append('OEM action')
|
||||
if eventdata[1] & 0b1 << 3:
|
||||
sysactions.append('Power Cycle')
|
||||
if eventdata[1] & 0b1 << 2:
|
||||
sysactions.append('Reset')
|
||||
if eventdata[1] & 0b1 << 1:
|
||||
sysactions.append('Power Down')
|
||||
if eventdata[1] & 0b1:
|
||||
sysactions.append('Alert')
|
||||
return ','.join(sysactions)
|
||||
elif offset == 5: # Clock change event, either before or after
|
||||
if eventdata[1] & 0b10000000:
|
||||
return 'After'
|
||||
else:
|
||||
return 'Before'
|
||||
elif sensor_type == 0x19 and offset == 0:
|
||||
return 'Requested {0} while {1}'.format(eventdata[1], eventdata[2])
|
||||
elif sensor_type == 0x1d and offset == 7:
|
||||
return restart_causes.get(eventdata[1], 'Unknown')
|
||||
elif sensor_type == 0x21:
|
||||
return '{0} {1}'.format(slot_types.get(eventdata[1], 'Unknown'),
|
||||
eventdata[2])
|
||||
|
||||
elif sensor_type == 0x23:
|
||||
phase = eventdata[1] & 0b1111
|
||||
return watchdog_boot_phases.get(phase, 'Unknown')
|
||||
elif sensor_type == 0x28:
|
||||
if offset == 4:
|
||||
return 'Sensor {0}'.format(eventdata[1])
|
||||
elif offset == 5:
|
||||
islogical = (eventdata[1] & 0b10000000)
|
||||
if islogical:
|
||||
if eventdata[2] in sdr.fru:
|
||||
return sdr.fru[eventdata[2]].fru_name
|
||||
else:
|
||||
return 'FRU {0}'.format(eventdata[2])
|
||||
elif sensor_type == 0x2a and offset == 3:
|
||||
return 'User {0}'.format(eventdata[1])
|
||||
elif sensor_type == 0x2b:
|
||||
return version_changes.get(eventdata[1], 'Unknown')
|
||||
elif sensor_type == 0x2c:
|
||||
cause = (eventdata[1] & 0b11110000) >> 4
|
||||
cause = fru_states.get(cause, 'Unknown')
|
||||
oldstate = eventdata[1] & 0b1111
|
||||
if oldstate != offset:
|
||||
try:
|
||||
cause += '(change from {0})'.format(
|
||||
event_consts.sensor_type_offsets[0x2c][oldstate]['desc'])
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
|
||||
async def _fix_sel_time(records, ipmicmd):
|
||||
timefetched = False
|
||||
rsp = None
|
||||
while not timefetched:
|
||||
try:
|
||||
rsp = await ipmicmd.raw_command(netfn=0xa, command=0x48)
|
||||
timefetched = True
|
||||
except pygexc.IpmiException as pi:
|
||||
if pi.ipmicode == 0x81:
|
||||
continue
|
||||
raise
|
||||
# The specification declares an epoch and all that, but we really don't
|
||||
# care. We instead just focus on differences from the 'present'
|
||||
nowtime = struct.unpack_from('<I', rsp['data'])[0]
|
||||
correctednowtime = nowtime
|
||||
if nowtime < 0x20000000:
|
||||
correctearly = True
|
||||
inpreinit = True
|
||||
else:
|
||||
correctearly = False
|
||||
inpreinit = False
|
||||
newtimestamp = 0
|
||||
lasttimestamp = None
|
||||
trimindexes = []
|
||||
correctionenabled = True
|
||||
for index in reversed(range(len(records))):
|
||||
record = records[index]
|
||||
if 'timecode' not in record or record['timecode'] == 0xffffffff:
|
||||
continue
|
||||
if ('event' in record and record['event'] == 'Clock time change'
|
||||
and record['event_data'] == 'After'):
|
||||
if (lasttimestamp is not None
|
||||
and record['timecode'] > lasttimestamp):
|
||||
# if the timestamp did something impossible, declare the rest
|
||||
# of history not meaningfully correctable
|
||||
correctionenabled = False
|
||||
newtimestamp = 0
|
||||
continue
|
||||
newtimestamp = record['timecode']
|
||||
trimindexes.append(index)
|
||||
elif ('event' in record and record['event'] == 'Clock time change'
|
||||
and record['event_data'] == 'Before'):
|
||||
if not correctionenabled:
|
||||
continue
|
||||
if newtimestamp:
|
||||
if record['timecode'] < 0x20000000:
|
||||
correctearly = True
|
||||
nowtime = correctednowtime
|
||||
# we want time that occurred before this point to get the delta
|
||||
# added to it to catch up
|
||||
correctednowtime += newtimestamp - record['timecode']
|
||||
newtimestamp = 0
|
||||
trimindexes.append(index)
|
||||
else:
|
||||
# clean up after potentially broken time sync pairs
|
||||
newtimestamp = 0
|
||||
if record['timecode'] < 0x20000000: # uptime timestamp
|
||||
if not correctearly or not correctionenabled:
|
||||
correctednowtime = nowtime
|
||||
continue
|
||||
if (lasttimestamp is not None
|
||||
and record['timecode'] > lasttimestamp):
|
||||
# Time has gone backwards in pre-init, no hope for
|
||||
# accurate time
|
||||
correctearly = False
|
||||
correctionenabled = False
|
||||
correctednowtime = nowtime
|
||||
continue
|
||||
inpreinit = True
|
||||
lasttimestamp = record['timecode']
|
||||
age = correctednowtime - record['timecode']
|
||||
record['timestamp'] = time.strftime(
|
||||
'%Y-%m-%dT%H:%M:%S', time.localtime(time.time() - age))
|
||||
else:
|
||||
# We are in 'normal' time, assume we cannot go to
|
||||
# pre-init time and do corrections unless time sync events
|
||||
# guide us in safely
|
||||
if (lasttimestamp is not None
|
||||
and record['timecode'] > lasttimestamp):
|
||||
# Time has gone backwards, without a clock sync
|
||||
# give up any attempt to correct from this point back...
|
||||
correctionenabled = False
|
||||
if inpreinit:
|
||||
inpreinit = False
|
||||
# We were in pre-init, now in real time, reset the
|
||||
# time correction factor to the last stored
|
||||
# 'wall clock' correction
|
||||
correctednowtime = nowtime
|
||||
correctearly = False
|
||||
lasttimestamp = record['timecode']
|
||||
if not correctionenabled or correctednowtime < 0x20000000:
|
||||
# We can't correct time when the correction factor is
|
||||
# rooted in a pre-init timestamp, just convert
|
||||
record['timestamp'] = time.strftime(
|
||||
'%Y-%m-%dT%H:%M:%S', time.localtime(
|
||||
record['timecode']))
|
||||
else:
|
||||
age = correctednowtime - record['timecode']
|
||||
record['timestamp'] = time.strftime(
|
||||
'%Y-%m-%dT%H:%M:%S', time.localtime(
|
||||
time.time() - age))
|
||||
for index in trimindexes:
|
||||
del records[index]
|
||||
|
||||
|
||||
class EventHandler(object):
|
||||
"""IPMI Event Processor
|
||||
|
||||
This class provides facilities for processing alerts and event log
|
||||
data. This can be used to aid in pulling historical event data
|
||||
from a BMC or as part of a trap handler to translate the traps into
|
||||
manageable data.
|
||||
|
||||
:param sdr: An SDR object (per aiohmi.ipmi.sdr) matching the target BMC SDR
|
||||
:param ipmicmd: An ipmi command object to fetch data live
|
||||
"""
|
||||
@classmethod
|
||||
async def create(cls, sdr, ipmicmd):
|
||||
self = cls()
|
||||
self._sdr = sdr
|
||||
self._ipmicmd = ipmicmd
|
||||
self.event_consts = await ipmicmd.get_event_constants()
|
||||
return self
|
||||
|
||||
def _populate_event(self, deassertion, event, event_data, event_type,
|
||||
sensor_type, sensorid):
|
||||
event['component_id'] = sensorid
|
||||
try:
|
||||
event['component'] = self._sdr.sensors[sensorid].name
|
||||
except KeyError:
|
||||
if sensorid == 0:
|
||||
event['component'] = None
|
||||
else:
|
||||
event['component'] = 'Sensor {0}'.format(sensorid)
|
||||
event['deassertion'] = deassertion
|
||||
event['event_data_bytes'] = event_data
|
||||
byte2type = (event_data[0] & 0b11000000) >> 6
|
||||
byte3type = (event_data[0] & 0b110000) >> 4
|
||||
if byte2type == 1:
|
||||
event['triggered_value'] = event_data[1]
|
||||
evtoffset = event_data[0] & 0b1111
|
||||
event['event_type_byte'] = event_type
|
||||
if event_type <= 0xc:
|
||||
event['component_type_id'] = sensor_type
|
||||
event['event_id'] = '{0}.{1}'.format(event_type, evtoffset)
|
||||
# use generic offset decode for event description
|
||||
event['component_type'] = self.event_consts.sensor_type_codes.get(
|
||||
sensor_type, '')
|
||||
evreading = self.event_consts.generic_type_offsets.get(
|
||||
event_type, {}).get(evtoffset, {})
|
||||
if event['deassertion']:
|
||||
event['event'] = evreading.get('deassertion_desc', '')
|
||||
event['severity'] = evreading.get(
|
||||
'deassertion_severity', pygconst.Health.Ok)
|
||||
else:
|
||||
event['event'] = evreading.get('desc', '')
|
||||
event['severity'] = evreading.get(
|
||||
'severity', pygconst.Health.Ok)
|
||||
elif event_type == 0x6f:
|
||||
event['component_type_id'] = sensor_type
|
||||
event['event_id'] = '{0}.{1}'.format(event_type, evtoffset)
|
||||
event['component_type'] = self.event_consts.sensor_type_codes.get(
|
||||
sensor_type, '')
|
||||
evreading = self.event_consts.sensor_type_offsets.get(
|
||||
sensor_type, {}).get(evtoffset, {})
|
||||
if event['deassertion']:
|
||||
event['event'] = evreading.get('deassertion_desc', '')
|
||||
event['severity'] = evreading.get(
|
||||
'deassertion_severity', pygconst.Health.Ok)
|
||||
else:
|
||||
event['event'] = evreading.get('desc', '')
|
||||
event['severity'] = evreading.get(
|
||||
'severity', pygconst.Health.Ok)
|
||||
if event_type == 1: # threshold
|
||||
if byte3type == 1:
|
||||
event['threshold_value'] = event_data[2]
|
||||
if 3 in (byte2type, byte3type) or event_type == 0x6f:
|
||||
# sensor specific decode, see sdr module...
|
||||
# 2 - 0xc: generic discrete, 0x6f, sensor specific
|
||||
additionaldata = decode_eventdata(
|
||||
sensor_type, evtoffset, event_data, self.event_consts,
|
||||
self._sdr)
|
||||
if additionaldata:
|
||||
event['event_data'] = additionaldata
|
||||
|
||||
async def decode_pet(self, specifictrap, petdata):
|
||||
if isinstance(specifictrap, int):
|
||||
specifictrap = struct.unpack('4B', struct.pack('>I', specifictrap))
|
||||
if len(specifictrap) != 4:
|
||||
raise pygexc.InvalidParameterValue(
|
||||
'specifictrap should be integer number or 4 byte array')
|
||||
specifictrap = bytearray(specifictrap)
|
||||
sensor_type = specifictrap[1]
|
||||
event_type = specifictrap[2]
|
||||
# Event Offset is in first event data byte, so no need to fetch it here
|
||||
# evtoffset = specifictrap[3] & 0b1111
|
||||
deassertion = (specifictrap[3] & 0b10000000) == 0b10000000
|
||||
# alertseverity = petdata[26]
|
||||
sensorid = '{0}.0'.format(petdata[28])
|
||||
event_data = petdata[31:34]
|
||||
event = {}
|
||||
seqnum = struct.unpack_from('>H', buffer(petdata[16:18]))[0]
|
||||
ltimestamp = struct.unpack_from('>I', buffer(petdata[18:22]))[0]
|
||||
petack = bytearray(struct.pack('<HIBBBBBB', seqnum, ltimestamp,
|
||||
petdata[25], petdata[27], petdata[28],
|
||||
*event_data))
|
||||
try:
|
||||
await self._ipmicmd.raw_command(netfn=4, command=0x17, data=petack)
|
||||
except pygexc.IpmiException: # Ignore failure to ack for now
|
||||
pass
|
||||
self._populate_event(deassertion, event, event_data, event_type,
|
||||
sensor_type, sensorid)
|
||||
event['timecode'] = ltimestamp
|
||||
await _fix_sel_time((event,), self._ipmicmd)
|
||||
return event
|
||||
|
||||
def _decode_standard_event(self, eventdata, event):
|
||||
# Ignore the generator id for now..
|
||||
if eventdata[2] not in (3, 4):
|
||||
raise pygexc.PyghmiException(
|
||||
'Unrecognized Event message version {0}'.format(eventdata[2]))
|
||||
sensor_type = eventdata[3]
|
||||
sensorid = '{0}.{1}.{2}'.format(
|
||||
eventdata[0], eventdata[4], eventdata[1] & 0b11)
|
||||
event_data = eventdata[6:]
|
||||
deassertion = (eventdata[5] & 0b10000000 == 0b10000000)
|
||||
event_type = eventdata[5] & 0b1111111
|
||||
self._populate_event(deassertion, event, event_data, event_type,
|
||||
sensor_type, sensorid)
|
||||
|
||||
async def _sel_decode(self, origselentry):
|
||||
selentry = bytearray(origselentry)
|
||||
event = {}
|
||||
event['record_id'] = struct.unpack_from('<H', origselentry[:2])[0]
|
||||
if selentry[2] == 2 or (0xc0 <= selentry[2] <= 0xdf):
|
||||
# Either standard, or at least the timestamp is standard
|
||||
event['timecode'] = struct.unpack_from('<I', buffer(selentry[3:7])
|
||||
)[0]
|
||||
if selentry[2] == 2: # ipmi defined standard format
|
||||
self._decode_standard_event(selentry[7:], event)
|
||||
elif 0xc0 <= selentry[2] <= 0xdf:
|
||||
event['oemid'] = selentry[7:10]
|
||||
event['oemdata'] = selentry[10:]
|
||||
elif selentry[2] >= 0xe0:
|
||||
# In this class of OEM message, all bytes are OEM, interpretation
|
||||
# is wholly left up to the OEM layer, using the OEM ID of the BMC
|
||||
event['oemdata'] = selentry[3:]
|
||||
await self._ipmicmd._oem.process_event(event, self._ipmicmd, selentry)
|
||||
if 'event_type_byte' in event:
|
||||
del event['event_type_byte']
|
||||
if 'event_data_bytes' in event:
|
||||
del event['event_data_bytes']
|
||||
return event
|
||||
|
||||
async def _fetch_entries(self, ipmicmd, startat, targetlist, rsvid=0):
|
||||
curr = startat
|
||||
endat = curr
|
||||
while curr != 0xffff:
|
||||
endat = curr
|
||||
reqdata = bytearray(struct.pack('<HHH', rsvid, curr, 0xff00))
|
||||
try:
|
||||
rsp = await ipmicmd.raw_command(
|
||||
netfn=0xa, command=0x43, data=reqdata)
|
||||
except pygexc.IpmiException as pi:
|
||||
if pi.ipmicode == 203:
|
||||
break
|
||||
else:
|
||||
raise
|
||||
curr = struct.unpack_from('<H', buffer(rsp['data'][:2]))[0]
|
||||
targetlist.append(await self._sel_decode(rsp['data'][2:]))
|
||||
return endat
|
||||
|
||||
async def fetch_sel(self, ipmicmd, clear=False):
|
||||
"""Fetch SEL entries
|
||||
|
||||
Return an iterable of SEL entries. If clearing is requested,
|
||||
the fetch and clear will be done as an atomic operation, assuring
|
||||
no entries are dropped.
|
||||
|
||||
:param ipmicmd: The Command object to use to interrogate
|
||||
:param clear: Whether to clear the entries upon retrieval.
|
||||
"""
|
||||
records = []
|
||||
# First we do a fetch all without reservation, reducing the risk
|
||||
# of having a long lived reservation that gets canceled in the middle
|
||||
endat = await self._fetch_entries(ipmicmd, 0, records)
|
||||
if clear and records: # don't bother clearing if there were no records
|
||||
# To do clear, we make a reservation first...
|
||||
rsp = await ipmicmd.raw_command(netfn=0xa, command=0x42)
|
||||
rsvid = struct.unpack_from('<H', rsp['data'])[0]
|
||||
# Then we refetch the tail with reservation (check for change)
|
||||
del records[-1] # remove the record that's about to be duplicated
|
||||
await self._fetch_entries(ipmicmd, endat, records, rsvid)
|
||||
# finally clear the SEL
|
||||
# 0XAA means start initiate, 0x524c43 is 'RCL' or 'CLR' backwards
|
||||
clrdata = bytearray(struct.pack('<HI', rsvid, 0xAA524C43))
|
||||
await ipmicmd.raw_command(netfn=0xa, command=0x47, data=clrdata)
|
||||
# Now to fixup the record timestamps... first we need to get the BMC
|
||||
# opinion of current time
|
||||
await _fix_sel_time(records, ipmicmd)
|
||||
for rec in records:
|
||||
yield rec
|
||||
352
confluent_server/aiohmi/ipmi/fru.py
Normal file
352
confluent_server/aiohmi/ipmi/fru.py
Normal file
@@ -0,0 +1,352 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This module provides access to SDR offered by a BMC
|
||||
|
||||
This data is common between 'sensors' and 'inventory' modules since SDR
|
||||
is both used to enumerate sensors for sensor commands and FRU ids for FRU
|
||||
commands
|
||||
|
||||
For now, we will not offer persistent SDR caching as we do in xCAT's IPMI
|
||||
code. Will see if it is adequate to advocate for high object reuse in a
|
||||
persistent process for the moment.
|
||||
|
||||
Focus is at least initially on the aspects that make the most sense for a
|
||||
remote client to care about. For example, smbus information is being
|
||||
skipped for now
|
||||
|
||||
This file handles parsing of fru format records as presented by IPMI
|
||||
devices. This format is documented in the 'Platform Management FRU
|
||||
Information Storage Definition (Document Revision 1.2)
|
||||
"""
|
||||
|
||||
import struct
|
||||
import time
|
||||
import weakref
|
||||
|
||||
import aiohmi.exceptions as iexc
|
||||
import aiohmi.ipmi.private.spd as spd
|
||||
|
||||
|
||||
fruepoch = 820454400 # 1/1/1996, 0:00
|
||||
|
||||
# This is from SMBIOS specification Table 16
|
||||
enclosure_types = {
|
||||
0: 'Unspecified',
|
||||
1: 'Other',
|
||||
2: 'Unknown',
|
||||
3: 'Desktop',
|
||||
4: 'Low Profile Desktop',
|
||||
5: 'Pizza Box',
|
||||
6: 'Mini Tower',
|
||||
7: 'Tower',
|
||||
8: 'Portable',
|
||||
9: 'Laptop',
|
||||
0xa: 'Notebook',
|
||||
0xb: 'Hand Held',
|
||||
0xc: 'Docking Station',
|
||||
0xd: 'All in One',
|
||||
0xe: 'Sub Notebook',
|
||||
0xf: 'Space-saving',
|
||||
0x10: 'Lunch Box',
|
||||
0x11: 'Main Server Chassis',
|
||||
0x12: 'Expansion Chassis',
|
||||
0x13: 'SubChassis',
|
||||
0x14: 'Bus Expansion Chassis',
|
||||
0x15: 'Peripheral Chassis',
|
||||
0x16: 'RAID Chassis',
|
||||
0x17: 'Rack Mount Chassis',
|
||||
0x18: 'Sealed-case PC',
|
||||
0x19: 'Multi-system Chassis',
|
||||
0x1a: 'Compact PCI',
|
||||
0x1b: 'Advanced TCA',
|
||||
0x1c: 'Blade',
|
||||
0x1d: 'Blade Enclosure',
|
||||
}
|
||||
|
||||
|
||||
def unpack6bitascii(inputdata):
|
||||
# This is a text encoding scheme that seems unique
|
||||
# to IPMI FRU. It seems to be relatively rare in practice
|
||||
result = ''
|
||||
while len(inputdata) > 0:
|
||||
currchunk = inputdata[:3]
|
||||
del inputdata[:3]
|
||||
currchar = currchunk[0] & 0b111111
|
||||
result += chr(0x20 + currchar)
|
||||
currchar = (currchunk[0] & 0b11000000) >> 6
|
||||
currchar |= (currchunk[1] & 0b1111) << 2
|
||||
result += chr(0x20 + currchar)
|
||||
currchar = (currchunk[1] & 0b11110000) >> 4
|
||||
currchar |= (currchunk[2] & 0b11) << 4
|
||||
result += chr(0x20 + currchar)
|
||||
currchar = (currchunk[2] & 0b11111100) >> 2
|
||||
result += chr(0x20 + currchar)
|
||||
return result
|
||||
|
||||
|
||||
def decode_fru_date(datebytes):
|
||||
# Returns ISO
|
||||
datebytes.append(0)
|
||||
minutesfromepoch = struct.unpack('<I', struct.pack('4B', *datebytes))[0]
|
||||
# Some data in the field has had some data less than 800
|
||||
# At this juncture, it's far more likely for this noise
|
||||
# to be incorrect than anything in particular
|
||||
if minutesfromepoch < 800:
|
||||
return None
|
||||
return time.strftime('%Y-%m-%dT%H:%M',
|
||||
time.gmtime((minutesfromepoch * 60) + fruepoch))
|
||||
|
||||
|
||||
class FRU(object):
|
||||
"""An object representing structure
|
||||
|
||||
FRU (Field Replaceable Unit) is the usual format for inventory in IPMI
|
||||
devices. This covers most standards compliant inventory data
|
||||
as well as presenting less well defined fields in a structured way.
|
||||
|
||||
:param rawdata: A binary string/bytearray of raw data from BMC or dump
|
||||
:param ipmicmd: An ipmi command object to fetch data live
|
||||
:param fruid: The identifier number of the FRU
|
||||
:param sdr: The sdr locator entry to help clarify how to parse data
|
||||
"""
|
||||
|
||||
def __init__(self, rawdata=None, ipmicmd=None, fruid=0, sdr=None):
|
||||
self.rawfru = rawdata
|
||||
self.databytes = None
|
||||
self.info = None
|
||||
self.sdr = sdr
|
||||
self.ipmicmd = None
|
||||
self.fruid = fruid
|
||||
if self.rawfru is not None:
|
||||
self.parsedata()
|
||||
elif ipmicmd is not None:
|
||||
self.ipmicmd = weakref.proxy(ipmicmd)
|
||||
else:
|
||||
raise TypeError('Either rawdata or ipmicmd must be specified')
|
||||
|
||||
async def initialize(self):
|
||||
if self.ipmicmd is not None:
|
||||
try:
|
||||
await self.fetch_fru(self.fruid)
|
||||
except iexc.IpmiException as ie:
|
||||
if ie.ipmicode in (195, 201, 203, 129):
|
||||
return
|
||||
raise
|
||||
self.parsedata()
|
||||
|
||||
|
||||
async def fetch_fru(self, fruid):
|
||||
response = await self.ipmicmd.raw_command(
|
||||
netfn=0xa, command=0x10, data=[fruid])
|
||||
if 'error' in response:
|
||||
raise iexc.IpmiException(response['error'], code=response['code'])
|
||||
frusize = response['data'][0] | (response['data'][1] << 8)
|
||||
# In our case, we don't need to think too hard about whether
|
||||
# the FRU is word or byte, we just process what we get back in the
|
||||
# payload
|
||||
chunksize = 224
|
||||
# Selected as it is accomodated by most tested things
|
||||
# and many tested things broke after going much
|
||||
# bigger
|
||||
if chunksize > frusize:
|
||||
chunksize = frusize
|
||||
offset = 0
|
||||
self.rawfru = bytearray([])
|
||||
while chunksize:
|
||||
response = await self.ipmicmd.raw_command(
|
||||
netfn=0xa, command=0x11, data=[fruid, offset & 0xff,
|
||||
offset >> 8, chunksize])
|
||||
if response['code'] in (201, 202):
|
||||
# if it was too big, back off and try smaller
|
||||
# Try just over half to mitigate the chance of
|
||||
# one request becoming three rather than just two
|
||||
if chunksize == 3:
|
||||
raise iexc.IpmiException(response['error'])
|
||||
chunksize //= 2
|
||||
chunksize += 2
|
||||
continue
|
||||
elif 'error' in response:
|
||||
raise iexc.IpmiException(response['error'], response['code'])
|
||||
offset += response['data'][0]
|
||||
if response['data'][0] == 0:
|
||||
break
|
||||
# move down to avoid exception when data[0] is zero
|
||||
self.rawfru.extend(response['data'][1:])
|
||||
if offset + chunksize > frusize:
|
||||
chunksize = frusize - offset
|
||||
|
||||
def parsedata(self):
|
||||
self.info = {}
|
||||
rawdata = self.rawfru
|
||||
self.databytes = bytearray(rawdata)
|
||||
if self.sdr is not None:
|
||||
frutype = self.sdr.fru_type_and_modifier >> 8
|
||||
frusubtype = self.sdr.fru_type_and_modifier & 0xff
|
||||
if frutype > 0x10 or frutype < 0x8 or frusubtype not in (0, 1, 2):
|
||||
return
|
||||
# TODO(jjohnson2): strict mode to detect aiohmi and BMC
|
||||
# gaps
|
||||
# raise iexc.PyghmiException(
|
||||
# 'Unsupported FRU device: {0:x}h, {1:x}h'.format(frutype,
|
||||
# frusubtype
|
||||
# ))
|
||||
elif frusubtype == 1:
|
||||
self.myspd = spd.SPD(self.databytes)
|
||||
self.info = self.myspd.info
|
||||
return
|
||||
if self.databytes[0] != 1:
|
||||
return
|
||||
# TODO(jjohnson2): strict mode to flag potential BMC errors
|
||||
# raise iexc.BmcErrorException("Invalid/Unsupported FRU format")
|
||||
# Ignore the internal use even if present.
|
||||
self._parse_chassis()
|
||||
self._parse_board()
|
||||
self._parse_prod()
|
||||
# TODO(jjohnson2): Multi Record area
|
||||
|
||||
def _decode_tlv(self, offset, lang=0):
|
||||
currtlv = self.databytes[offset]
|
||||
currlen = currtlv & 0b111111
|
||||
currtype = (currtlv & 0b11000000) >> 6
|
||||
retinfo = self.databytes[offset + 1:offset + currlen + 1]
|
||||
newoffset = offset + currlen + 1
|
||||
if currlen == 0:
|
||||
return None, newoffset
|
||||
if currtype == 0:
|
||||
# return it as a bytearray, not much to be done for it
|
||||
return retinfo, newoffset
|
||||
elif currtype == 3: # text string
|
||||
# Sometimes BMCs have FRU data with 0xff termination
|
||||
# contrary to spec, but can be tolerated
|
||||
# also in case something null terminates, handle that too
|
||||
# strictly speaking, \xff should be a y with diaeresis, but
|
||||
# erring on the side of that not being very relevant in practice
|
||||
# to fru info, particularly the last values
|
||||
# Additionally 0xfe has been observed, which should be a thorn, but
|
||||
# again assuming termination of string is more likely than thorn.
|
||||
retinfo = retinfo.rstrip(b'\xfe\xff\x10\x03\x00 ')
|
||||
retinfo = retinfo.replace(b'\x00', b'')
|
||||
if lang in (0, 25):
|
||||
try:
|
||||
retinfo = retinfo.decode('iso-8859-1')
|
||||
except (UnicodeError, LookupError):
|
||||
pass
|
||||
else:
|
||||
try:
|
||||
retinfo = retinfo.decode('utf-16le')
|
||||
except (UnicodeDecodeError, LookupError):
|
||||
pass
|
||||
# Some things lie about being text. Do the best we can by
|
||||
# removing trailing spaces and nulls like makes sense for text
|
||||
# and rely on vendors to workaround deviations in their OEM
|
||||
# module
|
||||
# retinfo = retinfo.rstrip(b'\x00 ')
|
||||
return retinfo, newoffset
|
||||
elif currtype == 1: # BCD 'plus'
|
||||
retdata = ''
|
||||
for byte in retinfo:
|
||||
byte = hex(byte).replace('0x', '').replace('a', ' ').replace(
|
||||
'b', '-').replace('c', '.')
|
||||
retdata += byte
|
||||
retdata = retdata.strip()
|
||||
return retdata, newoffset
|
||||
elif currtype == 2: # 6-bit ascii
|
||||
retinfo = unpack6bitascii(retinfo).strip()
|
||||
return retinfo, newoffset
|
||||
|
||||
def _parse_chassis(self):
|
||||
offset = 8 * self.databytes[2]
|
||||
if offset == 0:
|
||||
return
|
||||
if self.databytes[offset] & 0b1111 != 1:
|
||||
raise iexc.BmcErrorException("Invalid/Unsupported chassis area")
|
||||
inf = self.info
|
||||
# ignore length field, just process the data
|
||||
# add check to avoid exception
|
||||
if self.databytes[offset + 2] in enclosure_types.keys():
|
||||
inf['Chassis type'] = enclosure_types[self.databytes[offset + 2]]
|
||||
inf['Chassis part number'], offset = self._decode_tlv(offset + 3)
|
||||
inf['Chassis serial number'], offset = self._decode_tlv(offset)
|
||||
inf['chassis_extra'] = []
|
||||
self.extract_extra(inf['chassis_extra'], offset)
|
||||
|
||||
def extract_extra(self, target, offset, language=0):
|
||||
try:
|
||||
while self.databytes[offset] != 0xc1:
|
||||
fielddata, offset = self._decode_tlv(offset, language)
|
||||
target.append(fielddata)
|
||||
except IndexError:
|
||||
# If we overrun the end due to malformed FRU,
|
||||
# return at least what decoded right
|
||||
return
|
||||
|
||||
def _parse_board(self):
|
||||
offset = 8 * self.databytes[3]
|
||||
if offset == 0:
|
||||
return
|
||||
if self.databytes[offset] & 0b1111 != 1:
|
||||
raise iexc.BmcErrorException("Invalid/Unsupported board info area")
|
||||
inf = self.info
|
||||
language = self.databytes[offset + 2]
|
||||
inf['Board manufacture date'] = decode_fru_date(
|
||||
self.databytes[offset + 3:offset + 6])
|
||||
inf['Board manufacturer'], offset = self._decode_tlv(offset + 6)
|
||||
inf['Board product name'], offset = self._decode_tlv(offset, language)
|
||||
inf['Board serial number'], offset = self._decode_tlv(offset, language)
|
||||
inf['Board model'], offset = self._decode_tlv(offset, language)
|
||||
inf['Board FRU Id'], offset = self._decode_tlv(offset, language)
|
||||
inf['board_extra'] = []
|
||||
self.extract_extra(inf['board_extra'], offset, language)
|
||||
|
||||
def _parse_prod(self):
|
||||
offset = 8 * self.databytes[4]
|
||||
if offset == 0:
|
||||
return
|
||||
inf = self.info
|
||||
language = self.databytes[offset + 2]
|
||||
inf['Manufacturer'], offset = self._decode_tlv(offset + 3,
|
||||
language)
|
||||
inf['Product name'], offset = self._decode_tlv(offset, language)
|
||||
inf['Model'], offset = self._decode_tlv(offset, language)
|
||||
inf['Hardware Version'], offset = self._decode_tlv(offset, language)
|
||||
inf['Serial Number'], offset = self._decode_tlv(offset, language)
|
||||
inf['Asset Number'], offset = self._decode_tlv(offset, language)
|
||||
inf['FRU ID'], offset = self._decode_tlv(offset, language)
|
||||
inf['product_extra'] = []
|
||||
self.extract_extra(inf['product_extra'], offset, language)
|
||||
|
||||
def __repr__(self):
|
||||
return repr(self.info)
|
||||
# retdata = 'Chassis data\n'
|
||||
# retdata += ' Type: ' + repr(self.chassis_type) + '\n'
|
||||
# retdata += ' Part Number: ' + repr(self.chassis_part_number) + '\n'
|
||||
# retdata += ' Serial Number: ' + repr(self.chassis_serial) + '\n'
|
||||
# retdata += ' Extra: ' + repr(self.chassis_extra) + '\n'
|
||||
# retdata += 'Board data\n'
|
||||
# retdata += ' Manufacturer: ' + repr(self.board_manufacturer) + '\n'
|
||||
# retdata += ' Date: ' + repr(self.board_mfg_date) + '\n'
|
||||
# retdata += ' Product' + repr(self.board_product) + '\n'
|
||||
# retdata += ' Serial: ' + repr(self.board_serial) + '\n'
|
||||
# retdata += ' Model: ' + repr(self.board_model) + '\n'
|
||||
# retdata += ' Extra: ' + repr(self.board_extra) + '\n'
|
||||
# retdata += 'Product data\n'
|
||||
# retdata += ' Manufacturer: ' + repr(self.product_manufacturer)+'\n'
|
||||
# retdata += ' Name: ' + repr(self.product_name) + '\n'
|
||||
# retdata += ' Model: ' + repr(self.product_model) + '\n'
|
||||
# retdata += ' Version: ' + repr(self.product_version) + '\n'
|
||||
# retdata += ' Serial: ' + repr(self.product_serial) + '\n'
|
||||
# retdata += ' Asset: ' + repr(self.product_asset) + '\n'
|
||||
# retdata += ' Extra: ' + repr(self.product_extra) + '\n'
|
||||
# return retdata
|
||||
0
confluent_server/aiohmi/ipmi/oem/__init__.py
Normal file
0
confluent_server/aiohmi/ipmi/oem/__init__.py
Normal file
550
confluent_server/aiohmi/ipmi/oem/generic.py
Normal file
550
confluent_server/aiohmi/ipmi/oem/generic.py
Normal file
@@ -0,0 +1,550 @@
|
||||
# Copyright 2015 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import aiohmi.exceptions as exc
|
||||
import aiohmi.ipmi.private.constants as event_const
|
||||
import aiohmi.ipmi.sdr as ipmisdr
|
||||
import struct
|
||||
|
||||
class OEMHandler(object):
|
||||
"""Handler class for OEM capabilities.
|
||||
|
||||
Any vendor wishing to implement OEM extensions should look at this
|
||||
base class for an appropriate interface. If one does not exist, this
|
||||
base class should be extended. At initialization an OEM is given
|
||||
a dictionary with product_id, device_id, manufacturer_id, and
|
||||
device_revision as keys in a dictionary, along with an ipmi Command object
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def create(cls, oemid, ipmicmd):
|
||||
self = cls()
|
||||
return self
|
||||
|
||||
async def get_video_launchdata(self):
|
||||
return {}
|
||||
|
||||
async def get_description(self):
|
||||
"""Get a description of descriptive attributes of a node.
|
||||
|
||||
Height describes, in U how tall the system is, and slot is 0 if
|
||||
not a blade type server, and slot if it is.
|
||||
|
||||
:return: dictionary with 'height' and 'slot' members
|
||||
"""
|
||||
return {}
|
||||
|
||||
async def get_screenshot(self, outfile):
|
||||
return {}
|
||||
|
||||
async def get_system_power_watts(self, ipmicmd):
|
||||
# Use DCMI getpower reading command
|
||||
rsp = await ipmicmd.raw_command(netfn=0x2c, command=2, data=(0xdc, 1, 0, 0))
|
||||
wattage = struct.unpack('<H', rsp['data'][1:3])[0]
|
||||
return wattage
|
||||
|
||||
async def get_ikvm_methods(self):
|
||||
return []
|
||||
|
||||
async def get_ikvm_launchdata(self):
|
||||
# no standard ikvm behavior, must be oem defined
|
||||
return {}
|
||||
|
||||
async def get_average_processor_temperature(self, ipmicmd):
|
||||
# DCMI suggests preferrence for 0x37 ('Air inlet')
|
||||
# If not that, then 0x40 ('Air inlet')
|
||||
# in practice, some implementations use 0x27 ('External environment')
|
||||
if not hasattr(self, '_processor_names'):
|
||||
self._processor_names = []
|
||||
readings = []
|
||||
if not self._processor_names:
|
||||
sdr = await ipmicmd.init_sdr()
|
||||
for sensename in sdr.sensors:
|
||||
sensor = sdr.sensors[sensename]
|
||||
if sensor.reading_type != 1:
|
||||
continue
|
||||
if not sensor.baseunit:
|
||||
continue
|
||||
if sensor.sensor_type != 'Temperature':
|
||||
continue
|
||||
if sensor.entity == 'Processor':
|
||||
self._processor_names.append(sensor.sensor_name)
|
||||
readingvalues = []
|
||||
for procsensor in self._processor_names:
|
||||
try:
|
||||
reading = await ipmicmd.get_sensor_reading(procsensor)
|
||||
except exc.IpmiException:
|
||||
continue
|
||||
if reading.value is not None:
|
||||
readingvalues.append(float(reading.value))
|
||||
tmplreading = ipmisdr.SensorReading({'name': 'Average Processor Temperature', 'type': 'Temperature'}, '°C')
|
||||
if readingvalues:
|
||||
tmplreading.value = sum(readingvalues) / len(readingvalues)
|
||||
else:
|
||||
tmplreading.value = None
|
||||
tmplreading.unavailable = 1
|
||||
return tmplreading
|
||||
|
||||
|
||||
async def get_inlet_temperature(self, ipmicmd):
|
||||
# DCMI suggests preferrence for 0x37 ('Air inlet')
|
||||
# If not that, then 0x40 ('Air inlet')
|
||||
# in practice, some implementations use 0x27 ('External environment')
|
||||
if not hasattr(self, '_inlet_name'):
|
||||
self._inlet_name = None
|
||||
if self._inlet_name:
|
||||
return await ipmicmd.get_sensor_reading(self._inlet_name)
|
||||
sdr = await ipmicmd.init_sdr()
|
||||
extenv = []
|
||||
airinlets = []
|
||||
for sensename in sdr.sensors:
|
||||
sensor = sdr.sensors[sensename]
|
||||
if sensor.reading_type != 1:
|
||||
continue
|
||||
if not sensor.baseunit:
|
||||
continue
|
||||
if sensor.sensor_type != 'Temperature':
|
||||
continue
|
||||
if sensor.entity == 'External environment':
|
||||
if 'exhaust' in sensor.sensor_name.lower():
|
||||
continue
|
||||
extenv.append(sensor.sensor_name)
|
||||
if sensor.entity == 'Air inlet':
|
||||
airinlets.append(sensor.sensor_name)
|
||||
if airinlets:
|
||||
if len(airinlets) > 1:
|
||||
raise Exception('TODO: how to deal with multiple inlets')
|
||||
self._inlet_name = airinlets[0]
|
||||
elif extenv:
|
||||
if len(extenv) > 1:
|
||||
raise Exception('TODO: how to deal with multiple external environments')
|
||||
self._inlet_name = extenv[0]
|
||||
if not self._inlet_name:
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Unable to detect inlet sensor name for this platform')
|
||||
return await ipmicmd.get_sensor_reading(self._inlet_name)
|
||||
|
||||
async def process_event(self, event, ipmicmd, seldata):
|
||||
"""Modify an event according with OEM understanding.
|
||||
|
||||
Given an event, allow an OEM module to augment it. For example,
|
||||
event data fields can have OEM bytes. Other times an OEM may wish
|
||||
to apply some transform to some field to suit their conventions.
|
||||
"""
|
||||
event['oem_handler'] = None
|
||||
evdata = event['event_data_bytes']
|
||||
if evdata[0] & 0b11000000 == 0b10000000:
|
||||
event['oem_byte2'] = evdata[1]
|
||||
if evdata[0] & 0b110000 == 0b100000:
|
||||
event['oem_byte3'] = evdata[2]
|
||||
|
||||
async def clear_system_configuration(self):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Clearing system configuration not implemented for this platform')
|
||||
|
||||
async def clear_bmc_configuration(self):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Clearing BMC configuration not implemented for this platform')
|
||||
|
||||
async def get_oem_inventory_descriptions(self):
|
||||
"""Get descriptions of available additional inventory items
|
||||
|
||||
OEM implementation may provide additional records not indicated
|
||||
by FRU locator SDR records. An implementation is expected to
|
||||
implement this function to list component names that would map to
|
||||
OEM behavior beyond the specification. It should return an iterable
|
||||
of names
|
||||
"""
|
||||
if False:
|
||||
yield None
|
||||
|
||||
async def get_sensor_reading(self, sensorname):
|
||||
"""Get an OEM sensor
|
||||
|
||||
If software wants to model some OEM behavior as a 'sensor' without
|
||||
doing SDR, this hook provides that ability. It should mimic
|
||||
the behavior of 'get_sensor_reading' in command.py.
|
||||
"""
|
||||
raise Exception('Sensor not found: ' + sensorname)
|
||||
|
||||
async def get_sensor_descriptions(self):
|
||||
"""Get list of OEM sensor names and types
|
||||
|
||||
Iterate over dicts describing a label and type for OEM 'sensors'. This
|
||||
should mimic the behavior of the get_sensor_descriptions function
|
||||
in command.py.
|
||||
"""
|
||||
if False:
|
||||
yield None
|
||||
|
||||
async def get_diagnostic_data(self, savefile, progress=None):
|
||||
"""Download diagnostic data about target to a file
|
||||
|
||||
This should be a payload that the vendor's support team can use
|
||||
to do diagnostics.
|
||||
:param savefile: File object or filename to save to
|
||||
:param progress: Callback to be informed about progress
|
||||
:return:
|
||||
"""
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Do not know how to get diagnostic data for this platform')
|
||||
|
||||
async def get_sensor_data(self):
|
||||
"""Get OEM sensor data
|
||||
|
||||
Iterate through all OEM 'sensors' and return data as if they were
|
||||
normal sensors. This should mimic the behavior of the get_sensor_data
|
||||
function in command.py.
|
||||
"""
|
||||
if False:
|
||||
yield None
|
||||
|
||||
async def get_oem_inventory(self):
|
||||
"""Get tuples of component names and inventory data.
|
||||
|
||||
This returns an iterable of tuples. The first member of each tuple
|
||||
is a string description of the inventory item. The second member
|
||||
is a dict of inventory information about the component.
|
||||
"""
|
||||
async for desc in self.get_oem_inventory_descriptions():
|
||||
yield (desc, await self.get_inventory_of_component(desc))
|
||||
|
||||
async def get_inventory_of_component(self, component):
|
||||
"""Get inventory detail of an OEM defined component
|
||||
|
||||
Given a string that may be an OEM component, return the detail of that
|
||||
component. If the component does not exist, returns None
|
||||
"""
|
||||
return None
|
||||
|
||||
async def get_leds(self):
|
||||
"""Get tuples of LED categories.
|
||||
|
||||
Each category contains a category name and a dicionary of LED names
|
||||
with their status as values.
|
||||
"""
|
||||
if False:
|
||||
yield None
|
||||
|
||||
async def get_ntp_enabled(self):
|
||||
"""Get whether ntp is enabled or not
|
||||
|
||||
:returns: True if enabled, False if disabled, None if unsupported
|
||||
"""
|
||||
return None
|
||||
|
||||
async def set_ntp_enabled(self, enabled):
|
||||
"""Set whether NTP should be enabled
|
||||
|
||||
:returns: True on success
|
||||
"""
|
||||
return None
|
||||
|
||||
async def get_ntp_servers(self):
|
||||
"""Get current set of configured NTP servers
|
||||
|
||||
:returns iterable of configured NTP servers:
|
||||
"""
|
||||
return ()
|
||||
|
||||
async def set_ntp_server(self, server, index=0):
|
||||
"""Set an ntp server
|
||||
|
||||
:param server: Destination address of server to reach
|
||||
:param index: Index of server to configure, primary assumed if not
|
||||
specified
|
||||
:returns: True if success
|
||||
"""
|
||||
return None
|
||||
|
||||
async def process_fru(self, fru, name=None):
|
||||
"""Modify a fru entry with OEM understanding.
|
||||
|
||||
Given a fru, clarify 'extra' fields according to OEM rules and
|
||||
return the transformed data structure. If OEM processes, it is
|
||||
expected that it sets 'oem_parser' to the name of the module. For
|
||||
clients passing through data, it is suggested to pass through
|
||||
board/product/chassis_extra_data arrays if 'oem_parser' is None,
|
||||
and mask those fields if not None. It is expected that OEMs leave
|
||||
the fields intact so that if client code hard codes around the
|
||||
ordered lists that their expectations are not broken by an update.
|
||||
"""
|
||||
# In the generic case, just pass through
|
||||
if fru is None:
|
||||
return fru
|
||||
fru['oem_parser'] = None
|
||||
return fru
|
||||
|
||||
async def get_oem_firmware(self, bmcver, components, category):
|
||||
"""Get Firmware information."""
|
||||
|
||||
# Here the bmc version is passed into the OEM handler, to allow
|
||||
# the handler to enrich the data. For the generic case, just
|
||||
# provide the generic BMC version, which is all that is possible
|
||||
# Additionally, components may be provided for an advisory guide
|
||||
# on interesting firmware. The OEM library is permitted to return
|
||||
# more than requested, and it is the responsibility of the calling
|
||||
# code to know whether it cares or not. The main purpose of the
|
||||
# components argument is to indicate when certain performance
|
||||
# optimizations can be performed.
|
||||
yield 'BMC Version', {'version': bmcver}
|
||||
|
||||
async def get_oem_capping_enabled(self):
|
||||
"""Get PSU based power capping status
|
||||
|
||||
:return: True if enabled and False if disabled
|
||||
"""
|
||||
return ()
|
||||
|
||||
async def set_oem_capping_enabled(self, enable):
|
||||
"""Set PSU based power capping
|
||||
|
||||
:param enable: True for enable and False for disable
|
||||
"""
|
||||
return ()
|
||||
|
||||
async def get_oem_remote_kvm_available(self):
|
||||
"""Get remote KVM availability"""
|
||||
return False
|
||||
|
||||
async def get_oem_domain_name(self):
|
||||
"""Get Domain name"""
|
||||
return ()
|
||||
|
||||
async def set_oem_domain_name(self, name):
|
||||
"""Set Domain name
|
||||
|
||||
:param name: domain name to be set
|
||||
"""
|
||||
return ()
|
||||
|
||||
async def clear_storage_arrays(self):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote storage configuration not supported on this platform')
|
||||
|
||||
async def remove_storage_configuration(self, cfgspec):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote storage configuration not supported on this platform')
|
||||
|
||||
async def apply_storage_configuration(self, cfgspec):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote storage configuration not supported on this platform')
|
||||
|
||||
async def check_storage_configuration(self, cfgspec):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote storage configuration not supported on this platform')
|
||||
|
||||
async def get_storage_configuration(self):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote storage configuration not supported on this platform')
|
||||
|
||||
async def get_update_status(self):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Firmware update not supported on this platform')
|
||||
|
||||
async def update_firmware(self, filename, data=None, progress=None, bank=None):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Firmware update not supported on this platform')
|
||||
|
||||
async def reseat_bay(self, bay):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Reseat not supported on this platform')
|
||||
|
||||
async def get_graphical_console(self):
|
||||
"""Get graphical console launcher"""
|
||||
return ()
|
||||
|
||||
async def add_extra_net_configuration(self, netdata, channel=None):
|
||||
"""Add additional network configuration data
|
||||
|
||||
Given a standard netdata struct, add details as relevant from
|
||||
OEM commands, modifying the passed dictionary
|
||||
:param netdata: Dictionary to store additional network data
|
||||
"""
|
||||
return
|
||||
|
||||
async def get_oem_identifier(self):
|
||||
"""Get host name
|
||||
|
||||
"""
|
||||
return None
|
||||
|
||||
async def set_oem_identifier(self, name):
|
||||
"""Set host name
|
||||
|
||||
:param name: host name to be set
|
||||
"""
|
||||
return False
|
||||
|
||||
async def detach_remote_media(self):
|
||||
raise exc.UnsupportedFunctionality()
|
||||
|
||||
async def attach_remote_media(self, imagename, username, password):
|
||||
raise exc.UnsupportedFunctionality()
|
||||
|
||||
async def upload_media(self, filename, progress, data):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote media upload not supported on this system')
|
||||
|
||||
async def list_media(self):
|
||||
if False:
|
||||
yield None
|
||||
raise exc.UnsupportedFunctionality()
|
||||
|
||||
async def set_identify(self, on, duration, blink):
|
||||
"""Provide an OEM override for set_identify
|
||||
|
||||
Some systems may require an override for set identify.
|
||||
|
||||
"""
|
||||
raise exc.UnsupportedFunctionality()
|
||||
|
||||
async def get_health(self, summary):
|
||||
"""Provide an alternative or augmented health assessment
|
||||
|
||||
An OEM handler can preprocess the summary and extend it with OEM
|
||||
specific data, and then return to let generic processing occur.
|
||||
It can also raise the aiohmi exception BypassGenericBehavior to
|
||||
suppress the standards based routine, for enhanced performance.
|
||||
|
||||
:param summary: The health summary as prepared by the generic function
|
||||
:return: Nothing, modifies the summary object
|
||||
"""
|
||||
return []
|
||||
|
||||
async def set_hostname(self, hostname):
|
||||
"""OEM specific hook to specify name information"""
|
||||
raise exc.UnsupportedFunctionality()
|
||||
|
||||
async def get_hostname(self):
|
||||
"""OEM specific hook to specify name information"""
|
||||
raise exc.UnsupportedFunctionality()
|
||||
|
||||
async def set_user_access(self, uid, channel, callback, link_auth, ipmi_msg,
|
||||
privilege_level):
|
||||
if privilege_level.startswith('custom.'):
|
||||
raise exc.UnsupportedFunctionality()
|
||||
return # Nothing to do
|
||||
|
||||
async def set_alert_ipv6_destination(self, ip, destination, channel):
|
||||
"""Set an IPv6 alert destination
|
||||
|
||||
If and only if an implementation does not support standard
|
||||
IPv6 but has an OEM implementation, override this to process
|
||||
the data.
|
||||
|
||||
:param ip: IPv6 address to set
|
||||
:param destination: Destination number
|
||||
:param channel: Channel number to apply
|
||||
|
||||
:returns True if standard parameter set should be suppressed
|
||||
"""
|
||||
return False
|
||||
|
||||
async def get_extended_bmc_configuration(self):
|
||||
"""Get extended bmc configuration
|
||||
|
||||
In the case of potentially redundant/slow
|
||||
attributes, retrieve unpopular options that may be
|
||||
redundant or confusing and slow.
|
||||
"""
|
||||
return {}
|
||||
|
||||
async def get_bmc_configuration(self):
|
||||
"""Get additional BMC parameters
|
||||
|
||||
This allows a bmc to return arbitrary key-value pairs.
|
||||
"""
|
||||
return {}
|
||||
|
||||
async def set_bmc_configuration(self, changeset):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Platform does not support setting bmc attributes')
|
||||
|
||||
async def get_system_configuration(self, hideadvanced):
|
||||
"""Retrieve system configuration
|
||||
|
||||
This returns a dictionary of settings names to dictionaries including
|
||||
'current', 'default' and 'possible' values as well as 'help'
|
||||
|
||||
:param hideadvanced: Whether to hide 'advanced' settings that most
|
||||
users should not need. Defaults to True.
|
||||
"""
|
||||
return {}
|
||||
|
||||
async def set_system_configuration(self, changeset):
|
||||
"""Apply a changeset to system configuration
|
||||
|
||||
Takes a key value pair and applies it against the system configuration
|
||||
"""
|
||||
raise exc.UnsupportedFunctionality()
|
||||
|
||||
async def get_licenses(self):
|
||||
raise exc.UnsupportedFunctionality()
|
||||
yield None
|
||||
|
||||
async def delete_license(self, name):
|
||||
raise exc.UnsupportedFunctionality()
|
||||
|
||||
async def save_licenses(self, directory):
|
||||
raise exc.UnsupportedFunctionality()
|
||||
yield None
|
||||
|
||||
async def apply_license(self, filename, progress=None, data=None):
|
||||
raise exc.UnsupportedFunctionality()
|
||||
yield None
|
||||
|
||||
async def get_user_expiration(self, uid):
|
||||
return None
|
||||
|
||||
async def get_user_privilege_level(self, uid):
|
||||
return None
|
||||
|
||||
async def set_oem_extended_privilleges(self, uid):
|
||||
"""Set user extended privillege as 'KVM & VMedia Allowed'
|
||||
|
||||
|KVM & VMedia Not Allowed 0x00 0x00 0x00 0x00
|
||||
|KVM Only Allowed 0x01 0x00 0x00 0x00
|
||||
|VMedia Only Allowed 0x02 0x00 0x00 0x00
|
||||
|KVM & VMedia Allowed 0x03 0x00 0x00 0x00
|
||||
|
||||
:param uid: User ID.
|
||||
"""
|
||||
return False
|
||||
|
||||
async def process_zero_fru(self, zerofru):
|
||||
return await self.process_fru(zerofru)
|
||||
|
||||
async def is_valid(self, name):
|
||||
return name is not None
|
||||
|
||||
async def process_password(self, password, data):
|
||||
return data
|
||||
|
||||
async def set_server_capping(self, value):
|
||||
"""Set power capping for server
|
||||
|
||||
:param value: power capping value to set.
|
||||
"""
|
||||
pass
|
||||
|
||||
async def get_server_capping(self):
|
||||
"""Get power capping for server
|
||||
|
||||
:return: power capping value.
|
||||
"""
|
||||
return None
|
||||
|
||||
async def get_oem_event_const(self):
|
||||
return event_const
|
||||
0
confluent_server/aiohmi/ipmi/oem/lenovo/__init__.py
Executable file
0
confluent_server/aiohmi/ipmi/oem/lenovo/__init__.py
Executable file
637
confluent_server/aiohmi/ipmi/oem/lenovo/config.py
Normal file
637
confluent_server/aiohmi/ipmi/oem/lenovo/config.py
Normal file
@@ -0,0 +1,637 @@
|
||||
# Copyright 2017-2019 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""from Matthew Garret's 'firmware_config' project.
|
||||
|
||||
This contains functions to manage the firmware configuration of Lenovo servers
|
||||
"""
|
||||
|
||||
import ast
|
||||
import asyncio
|
||||
import base64
|
||||
import random
|
||||
import struct
|
||||
|
||||
import time
|
||||
|
||||
import aiohmi.exceptions as pygexc
|
||||
|
||||
try:
|
||||
import EfiCompressor
|
||||
from lxml import etree
|
||||
except ImportError:
|
||||
etree = None
|
||||
EfiCompressor = None
|
||||
|
||||
IMM_NETFN = 0x2e
|
||||
IMM_COMMAND = 0x90
|
||||
LENOVO_ENTERPRISE = [0x4d, 0x4f, 0x00]
|
||||
|
||||
OPEN_RO_COMMAND = [0x01, 0x05, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40]
|
||||
OPEN_WO_COMMAND = [0x01, 0x03, 0x01]
|
||||
READ_COMMAND = [0x02]
|
||||
WRITE_COMMAND = [0x03]
|
||||
CLOSE_COMMAND = [0x05]
|
||||
SIZE_COMMAND = [0x06]
|
||||
|
||||
class Unsupported(Exception):
|
||||
pass
|
||||
|
||||
def fromstring(inputdata):
|
||||
if b'!entity' in inputdata.lower():
|
||||
raise Exception('Unsupported XML')
|
||||
try:
|
||||
return etree.fromstring(inputdata)
|
||||
except etree.XMLSyntaxError:
|
||||
inputdata = bytearray(inputdata.decode('utf8', errors='backslashreplace').encode())
|
||||
for i in range(len(inputdata)):
|
||||
if inputdata[i] < 0x20 and inputdata[i] not in (9, 0xa, 0xd):
|
||||
inputdata[i] = 63
|
||||
inputdata = bytes(inputdata)
|
||||
return etree.fromstring(inputdata)
|
||||
|
||||
|
||||
|
||||
async def run_command_with_retry(connection, data):
|
||||
tries = 240
|
||||
while tries:
|
||||
tries -= 1
|
||||
try:
|
||||
return await connection.raw_command(
|
||||
netfn=IMM_NETFN, command=IMM_COMMAND, data=data)
|
||||
except pygexc.IpmiException as e:
|
||||
if e.ipmicode != 0xa or not tries:
|
||||
raise
|
||||
connection.ipmi_session.pause(1)
|
||||
|
||||
|
||||
def _convert_syntax(raw):
|
||||
return raw.replace('!', 'not').replace('||', 'or').replace(
|
||||
'&&', 'and').replace('-', '_')
|
||||
|
||||
|
||||
class _ExpEngine(object):
|
||||
def __init__(self, cfg, setting):
|
||||
self.cfg = cfg
|
||||
self.setting = setting
|
||||
self.relatedsettings = set([])
|
||||
|
||||
def lookup(self, category, setting):
|
||||
for optkey in self.cfg:
|
||||
opt = self.cfg[optkey]
|
||||
lid = opt['lenovo_id'].replace('-', '_')
|
||||
if (lid == category
|
||||
and opt['lenovo_setting'] == setting):
|
||||
self.relatedsettings.add(optkey)
|
||||
return opt['lenovo_value']
|
||||
return None
|
||||
|
||||
def process(self, parsed):
|
||||
if isinstance(parsed, ast.UnaryOp) and isinstance(parsed.op, ast.Not):
|
||||
return not self.process(parsed.operand)
|
||||
if isinstance(parsed, ast.Compare):
|
||||
if isinstance(parsed.ops[0], ast.NotEq):
|
||||
return self.process(parsed.left) != self.process(
|
||||
parsed.comparators[0])
|
||||
elif isinstance(parsed.ops[0], ast.Eq):
|
||||
return self.process(parsed.left) == self.process(
|
||||
parsed.comparators[0])
|
||||
if isinstance(parsed, ast.Num):
|
||||
return parsed.n
|
||||
if isinstance(parsed, ast.Attribute):
|
||||
category = parsed.value.id
|
||||
setting = parsed.attr
|
||||
return self.lookup(category, setting)
|
||||
if isinstance(parsed, ast.Name):
|
||||
if parsed.id == 'true':
|
||||
return True
|
||||
elif parsed.id == 'false':
|
||||
return False
|
||||
else:
|
||||
category = self.setting['lenovo_id']
|
||||
setting = parsed.id
|
||||
return self.lookup(category, setting)
|
||||
if isinstance(parsed, ast.BoolOp):
|
||||
if isinstance(parsed.op, ast.Or):
|
||||
return self.process(parsed.values[0]) or self.process(
|
||||
parsed.values[1])
|
||||
elif isinstance(parsed.op, ast.And):
|
||||
return self.process(parsed.values[0]) and self.process(
|
||||
parsed.values[1])
|
||||
|
||||
|
||||
def _eval_conditional(expression, cfg, setting):
|
||||
if not expression:
|
||||
return False, ()
|
||||
try:
|
||||
parsed = ast.parse(expression)
|
||||
parsed = parsed.body[0].value
|
||||
evaluator = _ExpEngine(cfg, setting)
|
||||
result = evaluator.process(parsed)
|
||||
return result, evaluator.relatedsettings
|
||||
except SyntaxError:
|
||||
return False, ()
|
||||
|
||||
|
||||
class LenovoFirmwareConfig(object):
|
||||
def __init__(self, xc, useipmi=True):
|
||||
if not etree:
|
||||
raise Exception("python-lxml and python-eficompressor required "
|
||||
"for this function")
|
||||
if useipmi:
|
||||
self.connection = xc.ipmicmd
|
||||
else:
|
||||
self.connection = None
|
||||
self.xc = xc
|
||||
|
||||
async def imm_size(self, filename):
|
||||
data = bytearray()
|
||||
data.extend(LENOVO_ENTERPRISE)
|
||||
data.extend(SIZE_COMMAND)
|
||||
if not isinstance(filename, bytes):
|
||||
filename = filename.encode('utf-8')
|
||||
data.extend(filename)
|
||||
|
||||
response = await run_command_with_retry(self.connection, data=data)
|
||||
|
||||
size = response['data'][3:7]
|
||||
|
||||
size = struct.unpack("i", size)
|
||||
return size[0]
|
||||
|
||||
async def imm_open(self, filename, write=False, size=None):
|
||||
response = None
|
||||
retries = 12
|
||||
data = bytearray()
|
||||
data.extend(LENOVO_ENTERPRISE)
|
||||
if write is False:
|
||||
data.extend(OPEN_RO_COMMAND)
|
||||
else:
|
||||
assert size is not None
|
||||
data.extend(OPEN_WO_COMMAND)
|
||||
hex_size = struct.pack("<I", size)
|
||||
data.extend(bytearray(hex_size[:4]))
|
||||
data.extend([0x01, 0x40])
|
||||
if not isinstance(filename, bytes):
|
||||
filename = filename.encode('utf-8')
|
||||
data.extend(filename)
|
||||
while len(data) < 38:
|
||||
data.append(0)
|
||||
|
||||
while retries:
|
||||
retries = retries - 1
|
||||
response = await run_command_with_retry(self.connection, data=data)
|
||||
try:
|
||||
if response['code'] == 0 or retries == 0:
|
||||
break
|
||||
except KeyError:
|
||||
pass
|
||||
self.connection.ipmi_session.pause(5)
|
||||
filehandle = response['data'][3:7]
|
||||
filehandle = struct.unpack("<I", filehandle)[0]
|
||||
return filehandle
|
||||
|
||||
async def imm_close(self, filehandle):
|
||||
data = []
|
||||
data += LENOVO_ENTERPRISE
|
||||
data += CLOSE_COMMAND
|
||||
|
||||
hex_filehandle = struct.pack("<I", filehandle)
|
||||
data.extend(bytearray(hex_filehandle[:4]))
|
||||
try:
|
||||
await run_command_with_retry(self.connection, data=data)
|
||||
except pygexc.IpmiException as e:
|
||||
if e.ipmicode != 203:
|
||||
raise
|
||||
|
||||
async def imm_write(self, filehandle, size, inputdata):
|
||||
blocksize = 0xc8
|
||||
offset = 0
|
||||
remaining = size
|
||||
|
||||
hex_filehandle = struct.pack("<I", filehandle)
|
||||
|
||||
while remaining > 0:
|
||||
data = bytearray()
|
||||
data.extend(LENOVO_ENTERPRISE)
|
||||
data.extend(WRITE_COMMAND)
|
||||
data.extend(hex_filehandle[:4])
|
||||
hex_offset = struct.pack("<I", offset)
|
||||
data.extend(hex_offset[:4])
|
||||
if remaining < blocksize:
|
||||
amount = remaining
|
||||
else:
|
||||
amount = blocksize
|
||||
data.extend(inputdata[offset:offset + amount])
|
||||
remaining -= blocksize
|
||||
offset += blocksize
|
||||
await run_command_with_retry(self.connection, data=data)
|
||||
await self.connection.ipmi_session.pause(0)
|
||||
|
||||
async def imm_read(self, filehandle, size):
|
||||
blocksize = 0xc8
|
||||
offset = 0
|
||||
output = b''
|
||||
remaining = size
|
||||
|
||||
hex_filehandle = struct.pack("<I", filehandle)
|
||||
hex_blocksize = struct.pack("<H", blocksize)
|
||||
|
||||
while remaining > 0:
|
||||
data = []
|
||||
data += LENOVO_ENTERPRISE
|
||||
data += READ_COMMAND
|
||||
data.extend(bytearray(hex_filehandle[:4]))
|
||||
hex_offset = struct.pack("<I", offset)
|
||||
data.extend(bytearray(hex_offset[:4]))
|
||||
if remaining < blocksize:
|
||||
hex_blocksize = struct.pack("<H", remaining)
|
||||
data.extend(hex_blocksize[:2])
|
||||
remaining -= blocksize
|
||||
offset += blocksize
|
||||
response = await run_command_with_retry(self.connection, data=data)
|
||||
output += response['data'][5:]
|
||||
await self.connection.ipmi_session.pause(0)
|
||||
return output
|
||||
|
||||
async def factory_reset(self):
|
||||
options = await self.get_fw_options()
|
||||
for option in options:
|
||||
if options[option]['is_list']:
|
||||
options[option]['new_value'] = [options[option]['default']]
|
||||
else:
|
||||
options[option]['new_value'] = options[option]['default']
|
||||
await self.set_fw_options(options)
|
||||
|
||||
async def get_fw_options(self, fetchimm=True):
|
||||
if fetchimm:
|
||||
cfgfilename = "config.efi"
|
||||
else:
|
||||
cfgfilename = "config"
|
||||
options = {}
|
||||
data = None
|
||||
if self.connection:
|
||||
rsp = ({}, 200)
|
||||
else:
|
||||
rsp = await self.xc.grab_redfish_response_with_status(
|
||||
'/redfish/v1/Managers/1')
|
||||
if rsp[1] == 200:
|
||||
if 'purley' not in rsp[0].get('Oem', {}).get('Lenovo', {}).get(
|
||||
'release_name', 'unknown'):
|
||||
rsp = await self.xc.grab_redfish_response_with_status(
|
||||
'/redfish/v1/Systems/1/Actions/Oem/LenovoComputerSystem.DSReadFile',
|
||||
{'Action': 'DSReadFile', 'FileName': cfgfilename})
|
||||
else:
|
||||
rsp = (None, 500)
|
||||
if rsp[1] == 200:
|
||||
for _ in range(0, 30):
|
||||
data = rsp[0]['Content']
|
||||
data = base64.b64decode(data)
|
||||
data = EfiCompressor.FrameworkDecompress(data, len(data))
|
||||
if len(data) != 0:
|
||||
break
|
||||
if self.connection:
|
||||
await self.connection.ipmi_session.pause(2)
|
||||
else:
|
||||
await asyncio.sleep(2)
|
||||
rsp = await self.xc.grab_redfish_response_with_status(
|
||||
'/redfish/v1/Systems/1/Actions/Oem/LenovoComputerSystem.DSReadFile',
|
||||
{'Action': 'DSReadFile', 'FileName': cfgfilename})
|
||||
else:
|
||||
if self.connection is None:
|
||||
raise Unsupported('Not Supported')
|
||||
for _ in range(0, 30):
|
||||
filehandle = await self.imm_open(cfgfilename)
|
||||
size = await self.imm_size(cfgfilename)
|
||||
data = await self.imm_read(filehandle, size)
|
||||
await self.imm_close(filehandle)
|
||||
data = EfiCompressor.FrameworkDecompress(data, len(data))
|
||||
if len(data) != 0:
|
||||
break
|
||||
await self.connection.ipmi_session.pause(2)
|
||||
if not data:
|
||||
raise Exception("BMC failed to return configuration information")
|
||||
xml = fromstring(data)
|
||||
sortid = 0
|
||||
for config in xml.iter("config"):
|
||||
lenovo_id = config.get("ID")
|
||||
if lenovo_id == 'iSCSI':
|
||||
# Do not support iSCSI at this time
|
||||
continue
|
||||
cfglabel = config.find('mriName')
|
||||
cfglabel = lenovo_id if cfglabel is None else cfglabel.text
|
||||
if lenovo_id == 'SYSTEM_PROD_DATA':
|
||||
theiter = [config]
|
||||
else:
|
||||
theiter = config.iter('group')
|
||||
for group in theiter:
|
||||
if lenovo_id == 'SYSTEM_PROD_DATA':
|
||||
lenovo_group = None
|
||||
else:
|
||||
lenovo_group = group.get("ID")
|
||||
for setting in group.iter("setting"):
|
||||
forceinstance = False
|
||||
is_list = False
|
||||
lenovo_setting = setting.get("ID")
|
||||
protect = True if setting.get("protected") == 'true' \
|
||||
else False
|
||||
hide = setting.get('suppress-if')
|
||||
if hide:
|
||||
hide = _convert_syntax(hide)
|
||||
readonly = setting.get('gray-if')
|
||||
if readonly:
|
||||
readonly = _convert_syntax(readonly)
|
||||
else:
|
||||
access = setting.get('access')
|
||||
if access == 'readonly':
|
||||
readonly = 'true'
|
||||
possible = []
|
||||
current = None
|
||||
currentidxes = []
|
||||
default = None
|
||||
reset = False
|
||||
name = setting.find("mriName").text
|
||||
help = setting.find("desc").text
|
||||
validexpression = None
|
||||
onedata = setting.find('text_data')
|
||||
if onedata is not None:
|
||||
if onedata.get('password') == 'true':
|
||||
protect = True
|
||||
enumdata = setting.find('enumerate_data')
|
||||
if enumdata is not None:
|
||||
if enumdata.get('maxinstance') is not None:
|
||||
forceinstance = True
|
||||
if onedata is None:
|
||||
onedata = setting.find('numeric_data')
|
||||
if onedata is not None:
|
||||
if onedata.get('maxinstance') is not None:
|
||||
forceinstance = True
|
||||
validexpression = onedata.get('pattern', None)
|
||||
instances = list(onedata.iter('instance'))
|
||||
if not instances:
|
||||
protect = True # not supported yet
|
||||
else:
|
||||
instbynum = {}
|
||||
defidx = 1
|
||||
for x in instances:
|
||||
xid = int(x.get('ID', defidx))
|
||||
instbynum[xid] = x
|
||||
defidx += 1
|
||||
current = [instbynum[idx].text for idx in sorted(instbynum)]
|
||||
currentidxes = list(sorted(instbynum))
|
||||
default = onedata.get('default', None)
|
||||
if default == '':
|
||||
default = None
|
||||
if (setting.find('cmd_data') is not None
|
||||
or setting.find('boolean_data') is not None):
|
||||
protect = True # Hide currently unsupported settings
|
||||
ldata = setting.find("list_data")
|
||||
extraorder = False
|
||||
currentdict = {}
|
||||
currentdef = {}
|
||||
if ldata is not None:
|
||||
is_list = True
|
||||
current = []
|
||||
extraorder = ldata.get('ordered') == 'true'
|
||||
lenovo_value = None
|
||||
instancetochoicemap = {}
|
||||
for choice in setting.iter("choice"):
|
||||
label = choice.find("label").text
|
||||
possible.append(label)
|
||||
for instance in choice.iter("instance"):
|
||||
if is_list:
|
||||
if not extraorder:
|
||||
current.append(label)
|
||||
else:
|
||||
currentdict[
|
||||
int(instance.get("order"))] = label
|
||||
else:
|
||||
currid = instance.get('ID')
|
||||
if currid:
|
||||
instancetochoicemap[currid] = label
|
||||
else:
|
||||
current = label
|
||||
try:
|
||||
lenovo_value = int(
|
||||
choice.find('value').text)
|
||||
except ValueError:
|
||||
lenovo_value = choice.find('value').text
|
||||
hasdefault = choice.get('default')
|
||||
if hasdefault == "true":
|
||||
default = label
|
||||
elif hasdefault is not None:
|
||||
try:
|
||||
a = int(hasdefault)
|
||||
currentdef[a] = label
|
||||
except ValueError:
|
||||
pass
|
||||
if choice.get("reset-required") == "true":
|
||||
reset = True
|
||||
if len(currentdict) > 0:
|
||||
for order in sorted(currentdict):
|
||||
current.append(currentdict[order])
|
||||
if len(currentdef) > 0:
|
||||
default = []
|
||||
for order in sorted(currentdef):
|
||||
default.append(currentdef[order])
|
||||
optionname = "%s.%s" % (cfglabel, name)
|
||||
alias = "%s.%s" % (lenovo_id, name)
|
||||
if onedata is not None:
|
||||
if current and len(current) > 1:
|
||||
instidx = 1
|
||||
for inst in current:
|
||||
if currentidxes:
|
||||
instidx = currentidxes.pop(0)
|
||||
optname = '{0}.{1}'.format(optionname, instidx)
|
||||
options[optname] = dict(
|
||||
current=inst,
|
||||
default=default,
|
||||
possible=possible,
|
||||
pending=None,
|
||||
new_value=None,
|
||||
help=help,
|
||||
is_list=is_list,
|
||||
lenovo_value=lenovo_value,
|
||||
lenovo_id=lenovo_id,
|
||||
lenovo_group=lenovo_group,
|
||||
lenovo_setting=lenovo_setting,
|
||||
lenovo_reboot=reset,
|
||||
lenovo_protect=protect,
|
||||
lenovo_instance=instidx,
|
||||
readonly_expression=readonly,
|
||||
hide_expression=hide,
|
||||
sortid=sortid,
|
||||
validexpression=validexpression,
|
||||
alias=alias)
|
||||
sortid += 1
|
||||
instidx += 1
|
||||
continue
|
||||
if current:
|
||||
current = current[0]
|
||||
if instancetochoicemap:
|
||||
for currid in sorted(instancetochoicemap):
|
||||
optname = '{0}.{1}'.format(optionname, currid)
|
||||
current = instancetochoicemap[currid]
|
||||
options[optname] = dict(
|
||||
current=current,
|
||||
default=default,
|
||||
possible=possible,
|
||||
pending=None,
|
||||
new_value=None,
|
||||
help=help,
|
||||
is_list=is_list,
|
||||
lenovo_value=lenovo_value,
|
||||
lenovo_id=lenovo_id,
|
||||
lenovo_group=lenovo_group,
|
||||
lenovo_setting=lenovo_setting,
|
||||
lenovo_reboot=reset,
|
||||
lenovo_protect=protect,
|
||||
lenovo_instance=currid,
|
||||
readonly_expression=readonly,
|
||||
hide_expression=hide,
|
||||
sortid=sortid,
|
||||
validexpression=validexpression,
|
||||
alias=alias)
|
||||
sortid += 1
|
||||
continue
|
||||
lenovoinstance = ""
|
||||
if forceinstance:
|
||||
optionname = '{0}.{1}'.format(optionname, 1)
|
||||
lenovoinstance = 1
|
||||
options[optionname] = dict(current=current,
|
||||
default=default,
|
||||
possible=possible,
|
||||
pending=None,
|
||||
new_value=None,
|
||||
help=help,
|
||||
is_list=is_list,
|
||||
lenovo_value=lenovo_value,
|
||||
lenovo_id=lenovo_id,
|
||||
lenovo_group=lenovo_group,
|
||||
lenovo_setting=lenovo_setting,
|
||||
lenovo_reboot=reset,
|
||||
lenovo_protect=protect,
|
||||
lenovo_instance=lenovoinstance,
|
||||
readonly_expression=readonly,
|
||||
hide_expression=hide,
|
||||
sortid=sortid,
|
||||
validexpression=validexpression,
|
||||
alias=alias)
|
||||
sortid = sortid + 1
|
||||
for opt in options:
|
||||
opt = options[opt]
|
||||
opt['hidden'], opt['hidden_why'] = _eval_conditional(
|
||||
opt['hide_expression'], options, opt)
|
||||
opt['readonly'], opt['readonly_why'] = _eval_conditional(
|
||||
opt['readonly_expression'], options, opt)
|
||||
|
||||
return options
|
||||
|
||||
async def set_fw_options(self, options, checkonly=False):
|
||||
changes = False
|
||||
random.seed()
|
||||
ident = 'ASU-%x-%x-%x-0' % (random.getrandbits(48),
|
||||
random.getrandbits(32),
|
||||
random.getrandbits(64))
|
||||
|
||||
configurations = etree.Element('configurations', ID=ident,
|
||||
type='update', update='ASU Client')
|
||||
|
||||
for option in options.keys():
|
||||
if options[option]['new_value'] is None:
|
||||
continue
|
||||
if options[option]['readonly']:
|
||||
errstr = '{0} is read only'.format(option)
|
||||
if options[option]['readonly_why']:
|
||||
ea = ' due to one of the following settings: {0}'.format(
|
||||
','.join(sorted(options[option]['readonly_why'])))
|
||||
errstr += ea
|
||||
raise pygexc.InvalidParameterValue(errstr)
|
||||
if options[option]['current'] == options[option]['new_value']:
|
||||
continue
|
||||
if options[option]['pending'] == options[option]['new_value']:
|
||||
continue
|
||||
if isinstance(options[option]['new_value'], str):
|
||||
# Coerce a simple string parameter to the expected list format
|
||||
options[option]['new_value'] = [options[option]['new_value']]
|
||||
options[option]['pending'] = options[option]['new_value']
|
||||
|
||||
is_list = options[option]['is_list']
|
||||
count = 0
|
||||
changes = True
|
||||
config = etree.Element('config', ID=options[option]['lenovo_id'])
|
||||
configurations.append(config)
|
||||
setting = etree.Element('setting',
|
||||
ID=options[option]['lenovo_setting'])
|
||||
if options[option]['lenovo_group'] is not None:
|
||||
group = etree.Element('group',
|
||||
ID=options[option]['lenovo_group'])
|
||||
config.append(group)
|
||||
group.append(setting)
|
||||
else:
|
||||
config.append(setting)
|
||||
if is_list:
|
||||
container = etree.Element('list_data')
|
||||
setting.append(container)
|
||||
else:
|
||||
container = etree.Element('enumerate_data')
|
||||
setting.append(container)
|
||||
|
||||
for value in options[option]['new_value']:
|
||||
choice = etree.Element('choice')
|
||||
container.append(choice)
|
||||
label = etree.Element('label')
|
||||
label.text = value
|
||||
choice.append(label)
|
||||
if is_list:
|
||||
count += 1
|
||||
instance = etree.Element(
|
||||
'instance', ID=str(options[option]['lenovo_instance']),
|
||||
order=str(count))
|
||||
else:
|
||||
instance = etree.Element(
|
||||
'instance', ID=str(options[option]['lenovo_instance']))
|
||||
choice.append(instance)
|
||||
|
||||
if not changes:
|
||||
return False
|
||||
if checkonly:
|
||||
return True
|
||||
|
||||
xml = etree.tostring(configurations)
|
||||
data = EfiCompressor.FrameworkCompress(xml, len(xml))
|
||||
bdata = base64.b64encode(data).decode('utf8')
|
||||
rsp = await self.xc.grab_redfish_response_with_status(
|
||||
'/redfish/v1/Managers/1')
|
||||
if rsp[1] == 200:
|
||||
if 'purley' not in rsp[0].get('Oem', {}).get('Lenovo', {}).get(
|
||||
'release_name', 'purley'):
|
||||
rsp = await self.xc.grab_redfish_response_with_status(
|
||||
'/redfish/v1/Systems/1/Actions/Oem/'
|
||||
'LenovoComputerSystem.DSWriteFile',
|
||||
{'Action': 'DSWriteFile', 'Resize': len(data),
|
||||
'FileName': 'asu_update.efi', 'Content': bdata})
|
||||
if rsp[1] == 204:
|
||||
return True
|
||||
if self.connection is None:
|
||||
raise Unsupported('Not Supported')
|
||||
filehandle = await self.imm_open("asu_update.efi", write=True,
|
||||
size=len(data))
|
||||
await self.imm_write(filehandle, len(data), data)
|
||||
stubread = len(data)
|
||||
if stubread > 8:
|
||||
stubread = 8
|
||||
await self.imm_read(filehandle, stubread)
|
||||
await self.imm_close(filehandle)
|
||||
return True
|
||||
53
confluent_server/aiohmi/ipmi/oem/lenovo/cpu.py
Executable file
53
confluent_server/aiohmi/ipmi/oem/lenovo/cpu.py
Executable file
@@ -0,0 +1,53 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from aiohmi.ipmi.oem.lenovo import inventory
|
||||
|
||||
cpu_fields = (
|
||||
inventory.EntryField("index", "B"),
|
||||
inventory.EntryField("Cores", "B"),
|
||||
inventory.EntryField("Threads", "B"),
|
||||
inventory.EntryField("Manufacturer", "13s"),
|
||||
inventory.EntryField("Family", "30s"),
|
||||
inventory.EntryField("Model", "30s"),
|
||||
inventory.EntryField("Stepping", "3s"),
|
||||
inventory.EntryField("Maximum Frequency", "<I",
|
||||
valuefunc=lambda v: str(v) + " MHz"),
|
||||
inventory.EntryField("Reserved", "h", include=False))
|
||||
|
||||
cpu_cmd = {
|
||||
"lenovo": {
|
||||
"netfn": 0x06,
|
||||
"command": 0x59,
|
||||
"data": (0x00, 0xc1, 0x01, 0x00)},
|
||||
"asrock": {
|
||||
"netfn": 0x3a,
|
||||
"command": 0x50,
|
||||
"data": (0x01, 0x01, 0x00)},
|
||||
}
|
||||
|
||||
|
||||
def parse_cpu_info(raw):
|
||||
return inventory.parse_inventory_category_entry(raw, cpu_fields)
|
||||
|
||||
|
||||
def get_categories():
|
||||
return {
|
||||
"cpu": {
|
||||
"idstr": "CPU {0}",
|
||||
"parser": parse_cpu_info,
|
||||
"command": cpu_cmd,
|
||||
"workaround_bmc_bug": lambda t: t == "ami"
|
||||
}
|
||||
}
|
||||
59
confluent_server/aiohmi/ipmi/oem/lenovo/dimm.py
Executable file
59
confluent_server/aiohmi/ipmi/oem/lenovo/dimm.py
Executable file
@@ -0,0 +1,59 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from aiohmi.ipmi.oem.lenovo import inventory
|
||||
|
||||
|
||||
dimm_fields = (
|
||||
inventory.EntryField("index", "B"),
|
||||
inventory.EntryField("manufacture_location", "B"),
|
||||
inventory.EntryField("channel_number", "B"),
|
||||
inventory.EntryField("module_type", "10s"),
|
||||
inventory.EntryField("ddr_voltage", "10s"),
|
||||
inventory.EntryField("speed", "<h",
|
||||
valuefunc=lambda v: str(v) + " MHz"),
|
||||
inventory.EntryField("capacity_mb", "<h",
|
||||
valuefunc=lambda v: v * 1024),
|
||||
inventory.EntryField("manufacturer", "30s"),
|
||||
inventory.EntryField("serial", ">I",
|
||||
valuefunc=lambda v: hex(v)[2:]),
|
||||
inventory.EntryField("model", "21s"),
|
||||
inventory.EntryField("reserved", "h", include=False)
|
||||
)
|
||||
|
||||
dimm_cmd = {
|
||||
"lenovo": {
|
||||
"netfn": 0x06,
|
||||
"command": 0x59,
|
||||
"data": (0x00, 0xc1, 0x02, 0x00)},
|
||||
"asrock": {
|
||||
"netfn": 0x3a,
|
||||
"command": 0x50,
|
||||
"data": (0x01, 0x02, 0x01)},
|
||||
}
|
||||
|
||||
|
||||
def parse_dimm_info(raw):
|
||||
return inventory.parse_inventory_category_entry(raw, dimm_fields)
|
||||
|
||||
|
||||
def get_categories():
|
||||
return {
|
||||
"dimm": {
|
||||
"idstr": "DIMM {0}",
|
||||
"parser": parse_dimm_info,
|
||||
"command": dimm_cmd,
|
||||
"workaround_bmc_bug": lambda t: True
|
||||
}
|
||||
}
|
||||
69
confluent_server/aiohmi/ipmi/oem/lenovo/drive.py
Executable file
69
confluent_server/aiohmi/ipmi/oem/lenovo/drive.py
Executable file
@@ -0,0 +1,69 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from aiohmi.ipmi.oem.lenovo import inventory
|
||||
|
||||
drive_fields = (
|
||||
inventory.EntryField("index", "B"),
|
||||
inventory.EntryField("VendorID", "64s"),
|
||||
inventory.EntryField("Size", "I",
|
||||
valuefunc=lambda v: str(v) + " MB"),
|
||||
inventory.EntryField("MediaType", "B", mapper={
|
||||
0x00: "HDD",
|
||||
0x01: "SSD"
|
||||
}),
|
||||
inventory.EntryField("InterfaceType", "B", mapper={
|
||||
0x00: "Unknown",
|
||||
0x01: "ParallelSCSI",
|
||||
0x02: "SAS",
|
||||
0x03: "SATA",
|
||||
0x04: "FC"
|
||||
}),
|
||||
inventory.EntryField("FormFactor", "B", mapper={
|
||||
0x00: "Unknown",
|
||||
0x01: "2.5in",
|
||||
0x02: "3.5in"
|
||||
}),
|
||||
inventory.EntryField("LinkSpeed", "B", mapper={
|
||||
0x00: "Unknown",
|
||||
0x01: "1.5 Gb/s",
|
||||
0x02: "3.0 Gb/s",
|
||||
0x03: "6.0 Gb/s",
|
||||
0x04: "12.0 Gb/s"
|
||||
}),
|
||||
inventory.EntryField("SlotNumber", "B"),
|
||||
inventory.EntryField("ControllerIndex", "B"),
|
||||
inventory.EntryField("DeviceState", "B", mapper={
|
||||
0x00: "active",
|
||||
0x01: "stopped",
|
||||
0xff: "transitioning"
|
||||
}))
|
||||
|
||||
|
||||
def parse_drive_info(raw):
|
||||
return inventory.parse_inventory_category_entry(raw, drive_fields)
|
||||
|
||||
|
||||
def get_categories():
|
||||
return {
|
||||
"drive": {
|
||||
"idstr": "Drive {0}",
|
||||
"parser": parse_drive_info,
|
||||
"command": {
|
||||
"netfn": 0x06,
|
||||
"command": 0x59,
|
||||
"data": (0x00, 0xc1, 0x04, 0x00)
|
||||
}
|
||||
}
|
||||
}
|
||||
166
confluent_server/aiohmi/ipmi/oem/lenovo/energy.py
Normal file
166
confluent_server/aiohmi/ipmi/oem/lenovo/energy.py
Normal file
@@ -0,0 +1,166 @@
|
||||
# Copyright 2017 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import struct
|
||||
|
||||
import aiohmi.constants as const
|
||||
import aiohmi.exceptions as pygexc
|
||||
import aiohmi.ipmi.sdr as sdr
|
||||
|
||||
|
||||
class EnergyManager(object):
|
||||
|
||||
@classmethod
|
||||
async def create(cls, ipmicmd):
|
||||
# there are two IANA possible for the command set, start with
|
||||
# the Lenovo, then fallback to IBM
|
||||
# We start with a 'find firmware instance' to test the water and
|
||||
# get the handle (which has always been the same, but just in case
|
||||
self = cls()
|
||||
self.iana = bytearray(b'\x66\x4a\x00')
|
||||
self._usefapm = False
|
||||
self._mypowermeters = ()
|
||||
try:
|
||||
rsp = await ipmicmd.raw_command(netfn=0x3a, command=0x32, data=[4, 2, 0, 0, 0])
|
||||
if len(rsp['data']) >= 8:
|
||||
self.supportedmeters = ('DC Energy', 'GPU Power',
|
||||
'Node Power', 'Total Power')
|
||||
self._mypowermeters = ('node power', 'total power', 'gpu power', 'riser 1 power', 'riser 2 power')
|
||||
self._usefapm = True
|
||||
return
|
||||
except pygexc.IpmiException:
|
||||
pass
|
||||
|
||||
try:
|
||||
rsp = await ipmicmd.raw_command(netfn=0x2e, command=0x82,
|
||||
data=self.iana + b'\x00\x00\x01')
|
||||
except pygexc.IpmiException as ie:
|
||||
if ie.ipmicode == 193: # try again with IBM IANA
|
||||
self.iana = bytearray(b'\x4d\x4f\x00')
|
||||
rsp = await ipmicmd.raw_command(netfn=0x2e, command=0x82,
|
||||
data=self.iana + b'\x00\x00\x01')
|
||||
else:
|
||||
raise
|
||||
if rsp['data'][4:6] not in (b'\x02\x01', b'\x02\x06', b'\x02\x09'):
|
||||
raise pygexc.UnsupportedFunctionality(
|
||||
"Energy Control {0}.{1} not recognized".format(rsp['data'][4],
|
||||
rsp['data'][5]))
|
||||
self.modhandle = bytearray(rsp['data'][6:7])
|
||||
if await self.get_ac_energy(ipmicmd):
|
||||
self.supportedmeters = ('AC Energy', 'DC Energy')
|
||||
else:
|
||||
self.supportedmeters = ('DC Energy',)
|
||||
return self
|
||||
|
||||
def supports(self, name):
|
||||
if name.lower() in self._mypowermeters:
|
||||
return True
|
||||
return False
|
||||
|
||||
async def get_sensor(self, name, ipmicmd):
|
||||
if name.lower() not in self._mypowermeters:
|
||||
raise pygexc.UnsupportedFunctionality('Unrecogcized sensor')
|
||||
tries = 3
|
||||
rsp = None
|
||||
while tries:
|
||||
tries -= 1
|
||||
try:
|
||||
rsp = await ipmicmd.raw_command(netfn=0x3a, command=0x32, data=[4, 8, 0, 0, 0])
|
||||
break
|
||||
except pygexc.IpmiException as ie:
|
||||
if tries and ie.ipmicode == 0xc3:
|
||||
ipmicmd.ipmi_session.pause(0.1)
|
||||
continue
|
||||
raise
|
||||
if rsp is None:
|
||||
raise pygexc.UnsupportedFunctionality('Unrecogcized sensor')
|
||||
npow, gpupow, r1pow, r2pow = struct.unpack('<HHHH', rsp['data'][6:14])
|
||||
if name.lower().startswith('node'):
|
||||
return npow, 'W'
|
||||
elif name.lower().startswith('gpu'):
|
||||
return gpupow, 'W'
|
||||
elif name.lower().startswith('total'):
|
||||
return npow + gpupow, 'W'
|
||||
|
||||
async def get_fapm_energy(self, ipmicmd):
|
||||
rsp = await ipmicmd.raw_command(netfn=0x3a, command=0x32, data=[4, 2, 0, 0, 0])
|
||||
j, mj = struct.unpack('<IH', rsp['data'][2:8])
|
||||
mj = mj + (j * 1000)
|
||||
return float(mj / 1000000 / 3600)
|
||||
|
||||
async def get_energy_precision(self, ipmicmd):
|
||||
rsp = await ipmicmd.raw_command(
|
||||
netfn=0x2e, command=0x81,
|
||||
data=self.iana + self.modhandle + b'\x01\x80')
|
||||
print(repr(rsp['data'][:]))
|
||||
|
||||
async def get_ac_energy(self, ipmicmd):
|
||||
try:
|
||||
rsp = await ipmicmd.raw_command(
|
||||
netfn=0x2e, command=0x81,
|
||||
data=self.iana + self.modhandle + b'\x01\x82\x01\x08')
|
||||
# data is in millijoules, convert to the more recognizable kWh
|
||||
return float(
|
||||
struct.unpack('!Q', rsp['data'][3:])[0]) / 1000000 / 3600
|
||||
except pygexc.IpmiException as ie:
|
||||
if ie.ipmicode == 0xcb:
|
||||
return 0.0
|
||||
raise
|
||||
|
||||
async def get_dc_energy(self, ipmicmd):
|
||||
if self._usefapm:
|
||||
return await self.get_fapm_energy(ipmicmd)
|
||||
rsp = await ipmicmd.raw_command(
|
||||
netfn=0x2e, command=0x81,
|
||||
data=self.iana + self.modhandle + b'\x01\x82\x00\x08')
|
||||
# data is in millijoules, convert to the more recognizable kWh
|
||||
return float(struct.unpack('!Q', rsp['data'][3:])[0]) / 1000000 / 3600
|
||||
|
||||
|
||||
class Energy(object):
|
||||
|
||||
def __init__(self, ipmicmd):
|
||||
self.ipmicmd = ipmicmd
|
||||
|
||||
async def get_energy_sensor(self):
|
||||
# read the cpu usage
|
||||
|
||||
try:
|
||||
rsp = await self.ipmicmd.raw_command(netfn=0x04,
|
||||
command=0x2d,
|
||||
bridge_request={"addr": 0x2c,
|
||||
"channel": 0x06},
|
||||
data=[0xbe])
|
||||
except pygexc.IpmiException:
|
||||
return
|
||||
rdata = bytearray(rsp["data"])
|
||||
cpu_usage = rdata[0] * 100 / 0xff
|
||||
# mimic the power sensor
|
||||
temp = {'name': "CPU_Usage",
|
||||
'health': const.Health.Ok,
|
||||
'states': [],
|
||||
'state_ids': [],
|
||||
'type': "Processor",
|
||||
'units': "%",
|
||||
'value': cpu_usage,
|
||||
'imprecision': None}
|
||||
yield (sdr.SensorReading(temp, temp['units']))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
import aiohmi.ipmi.command as cmd
|
||||
import sys
|
||||
c = cmd.Command(sys.argv[1], os.environ['BMCUSER'], os.environ['BMCPASS'])
|
||||
EnergyManager(c).get_dc_energy(c)
|
||||
117
confluent_server/aiohmi/ipmi/oem/lenovo/firmware.py
Normal file
117
confluent_server/aiohmi/ipmi/oem/lenovo/firmware.py
Normal file
@@ -0,0 +1,117 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from aiohmi.ipmi.oem.lenovo import inventory
|
||||
|
||||
|
||||
firmware_fields = (
|
||||
inventory.EntryField("Revision", "B"),
|
||||
inventory.EntryField("Bios", "16s"),
|
||||
inventory.EntryField("Operational ME", "10s"),
|
||||
inventory.EntryField("Recovery ME", "10s"),
|
||||
inventory.EntryField("RAID 1", "16s"),
|
||||
inventory.EntryField("RAID 2", "16s"),
|
||||
inventory.EntryField("Mezz 1", "16s"),
|
||||
inventory.EntryField("Mezz 2", "16s"),
|
||||
inventory.EntryField("BMC", "16s"),
|
||||
inventory.EntryField("LEPT", "16s"),
|
||||
inventory.EntryField("PSU 1", "16s"),
|
||||
inventory.EntryField("PSU 2", "16s"),
|
||||
inventory.EntryField("CPLD", "16s"),
|
||||
inventory.EntryField("LIND", "16s"),
|
||||
inventory.EntryField("WIND", "16s"),
|
||||
inventory.EntryField("DIAG", "16s"))
|
||||
|
||||
asrock_firmware_fields = (
|
||||
inventory.EntryField("Major Firmware Revision", "B"),
|
||||
inventory.EntryField("Minor Firmware Revision", "B"),
|
||||
inventory.EntryField("Auxiliary Firmware Revision", "4s"))
|
||||
|
||||
firmware_cmd = {
|
||||
"lenovo": {
|
||||
"netfn": 0x06,
|
||||
"command": 0x59,
|
||||
"data": (0x00, 0xc7, 0x00, 0x00)},
|
||||
"asrock": {
|
||||
"netfn": 0x3a,
|
||||
"command": 0x50,
|
||||
"data": (0x02, 0x00, 0x01)},
|
||||
}
|
||||
|
||||
bios_cmd = {
|
||||
"lenovo": {
|
||||
"netfn": 0x32,
|
||||
"command": 0xE8,
|
||||
"data": (0x01, 0x01, 0x02)},
|
||||
"asrock": {
|
||||
"netfn": 0x3a,
|
||||
"command": 0x50,
|
||||
"data": (0x02, 0x01, 0x01)},
|
||||
}
|
||||
|
||||
|
||||
def parse_firmware_info(raw, bios_versions=None, asrock=False):
|
||||
fields = firmware_fields
|
||||
|
||||
if asrock:
|
||||
fields = asrock_firmware_fields
|
||||
|
||||
bytes_read, data = inventory.parse_inventory_category_entry(raw, fields)
|
||||
if asrock:
|
||||
major_version = data['Major Firmware Revision']
|
||||
minor_version = data['Minor Firmware Revision']
|
||||
# Asrock RS160 the minor version is Binary Coded Decimal,
|
||||
# convert it to Decimal
|
||||
minor_version = (0xff & (minor_version >> 4)) * 10 + \
|
||||
(0xf & minor_version)
|
||||
aux_reversion = 0
|
||||
if str(data['Auxiliary Firmware Revision']) != '':
|
||||
aux_reversion = ord(data['Auxiliary Firmware Revision'])
|
||||
|
||||
bmc_version = "%s.%s.%s" % (
|
||||
str(major_version),
|
||||
str(minor_version),
|
||||
str(aux_reversion))
|
||||
|
||||
yield ("BMC", {'version': bmc_version})
|
||||
if bios_versions is not None:
|
||||
yield ("Bios", {'version': bios_versions[0:]})
|
||||
else:
|
||||
del data["Revision"]
|
||||
for key in data:
|
||||
yield (key, {'version': data[key]})
|
||||
if bios_versions is not None:
|
||||
yield ("Bios_bundle_ver",
|
||||
{'version': bios_versions['new_img_version']})
|
||||
yield ("Bios_current_ver",
|
||||
{'version': bios_versions['cur_img_version']})
|
||||
|
||||
|
||||
def parse_bios_number(raw):
|
||||
return inventory.parse_bios_number_entry(raw)
|
||||
|
||||
|
||||
def get_categories():
|
||||
return {
|
||||
"firmware": {
|
||||
"idstr": "FW Version",
|
||||
"parser": parse_firmware_info,
|
||||
"command": firmware_cmd
|
||||
},
|
||||
"bios_version": {
|
||||
"idstr": "Bios Version",
|
||||
"parser": parse_bios_number,
|
||||
"command": bios_cmd
|
||||
}
|
||||
}
|
||||
1440
confluent_server/aiohmi/ipmi/oem/lenovo/handler.py
Executable file
1440
confluent_server/aiohmi/ipmi/oem/lenovo/handler.py
Executable file
File diff suppressed because it is too large
Load Diff
2648
confluent_server/aiohmi/ipmi/oem/lenovo/imm.py
Normal file
2648
confluent_server/aiohmi/ipmi/oem/lenovo/imm.py
Normal file
File diff suppressed because it is too large
Load Diff
181
confluent_server/aiohmi/ipmi/oem/lenovo/inventory.py
Executable file
181
confluent_server/aiohmi/ipmi/oem/lenovo/inventory.py
Executable file
@@ -0,0 +1,181 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import struct
|
||||
|
||||
categories = {}
|
||||
|
||||
|
||||
def register_inventory_category(module):
|
||||
c = module.get_categories()
|
||||
for id in c:
|
||||
categories[id] = c[id]
|
||||
|
||||
|
||||
class EntryField(object):
|
||||
"""Store inventory field parsing options.
|
||||
|
||||
Represents an inventory field and its options for the custom requests to a
|
||||
ThinkServer's BMC.
|
||||
|
||||
:param name: the name of the field
|
||||
:param fmt: the format of the field (see struct module for details)
|
||||
:param include: whether to include the field in the parse output
|
||||
:param mapper: a dictionary mapping values to new values for the parse
|
||||
output
|
||||
:param valuefunc: a function to be called to change the value in the last
|
||||
step of the build process.
|
||||
:param presence: whether the field indicates presence. In this case, the
|
||||
field will not be included. If the value is false, the
|
||||
item will be discarded.
|
||||
"""
|
||||
def __init__(self, name, fmt, include=True, mapper=None, valuefunc=None,
|
||||
multivaluefunc=False, presence=False):
|
||||
self.name = name
|
||||
self.fmt = fmt
|
||||
self.include = include
|
||||
self.mapper = mapper
|
||||
self.valuefunc = valuefunc
|
||||
self.multivaluefunc = multivaluefunc
|
||||
self.presence = presence
|
||||
|
||||
|
||||
# General parameter parsing functions
|
||||
def parse_inventory_category(name, info, asrock=False, countable=True):
|
||||
"""Parses every entry in an inventory category
|
||||
|
||||
For example: CPU, memory, PCI, drives
|
||||
Expects the first byte to be a count of the number of entries, followed
|
||||
by a list of elements to be parsed by a dedicated parser (below).
|
||||
|
||||
:param name: the name of the parameter (e.g.: "cpu")
|
||||
:param info: a list of integers with raw data read from an IPMI requests
|
||||
:param asrock: a boolean represents if RS160 with asrockrack or not
|
||||
:param countable: whether the data have an entries count field
|
||||
:returns: dict -- a list of entries in the category.
|
||||
"""
|
||||
raw = info["data"][1:]
|
||||
|
||||
if name == "cpu" and asrock:
|
||||
raw = info["data"]
|
||||
|
||||
cur = 0
|
||||
if countable:
|
||||
count = bytearray(raw)[cur]
|
||||
cur += 1
|
||||
else:
|
||||
count = 0
|
||||
discarded = 0
|
||||
|
||||
entries = []
|
||||
while cur < len(raw):
|
||||
read, parser = categories[name]["parser"](raw[cur:])
|
||||
cur = cur + read
|
||||
# Account for discarded entries (because they are not present)
|
||||
if parser is None:
|
||||
discarded += 1
|
||||
continue
|
||||
if not countable:
|
||||
# count by myself
|
||||
count += 1
|
||||
parser["index"] = count
|
||||
entries.append(parser)
|
||||
|
||||
# TODO(avidal): raise specific exception to point that there's data left in
|
||||
# the buffer
|
||||
if cur != len(raw):
|
||||
raise Exception
|
||||
# TODO(avidal): raise specific exception to point that the number of
|
||||
# entries is different than the expected
|
||||
if count - discarded != len(entries) and not asrock:
|
||||
raise Exception
|
||||
return entries
|
||||
|
||||
|
||||
def parse_inventory_category_entry(raw, fields):
|
||||
"""Parses one entry in an inventory category.
|
||||
|
||||
:param raw: the raw data to the entry. May contain more than one entry,
|
||||
only one entry will be read in that case.
|
||||
:param fields: an iterable of EntryField objects to be used for parsing the
|
||||
entry.
|
||||
|
||||
:returns: dict -- a tuple with the number of bytes read and a dictionary
|
||||
representing the entry.
|
||||
"""
|
||||
r = raw
|
||||
|
||||
obj = {}
|
||||
bytes_read = 0
|
||||
discard = False
|
||||
for field in fields:
|
||||
value = struct.unpack_from(field.fmt, r)[0]
|
||||
read = struct.calcsize(field.fmt)
|
||||
bytes_read += read
|
||||
if bytes_read > len(raw):
|
||||
break
|
||||
|
||||
r = r[read:]
|
||||
# If this entry is not actually present, just parse and then discard it
|
||||
if field.presence and not bool(value):
|
||||
discard = True
|
||||
if not field.include:
|
||||
continue
|
||||
|
||||
if (field.fmt[-1] == "s"):
|
||||
value = value.rstrip(b'\x00\xff')
|
||||
if (field.mapper and value in field.mapper):
|
||||
value = field.mapper[value]
|
||||
if (field.valuefunc):
|
||||
value = field.valuefunc(value)
|
||||
|
||||
if not field.multivaluefunc:
|
||||
obj[field.name] = value
|
||||
else:
|
||||
for key in value:
|
||||
obj[key] = value[key]
|
||||
|
||||
if discard:
|
||||
obj = None
|
||||
return bytes_read, obj
|
||||
|
||||
|
||||
def parse_bios_number_entry(raw):
|
||||
"""Parses the Bios number given a raw data.
|
||||
|
||||
:param raw: the raw data to the entry.
|
||||
|
||||
:returns: dict -- structure with read current and newest versions
|
||||
"""
|
||||
new_major_version = struct.unpack_from("1B", raw, 25)[0]
|
||||
new_minor_version = struct.unpack_from("1B", raw, 26)[0]
|
||||
new_aux = struct.unpack_from("I", raw, 27)[0]
|
||||
|
||||
cur_major_version = struct.unpack_from("1B", raw, 31)[0]
|
||||
cur_minor_version = struct.unpack_from("1B", raw, 32)[0]
|
||||
cur_aux = struct.unpack_from("I", raw, 33)[0]
|
||||
|
||||
new_image_version = "%s.%s.%s" % (
|
||||
str(new_major_version),
|
||||
str(new_minor_version),
|
||||
str(new_aux))
|
||||
cur_image_version = "%s.%s.%s" % (
|
||||
str(cur_major_version),
|
||||
str(cur_minor_version),
|
||||
str(cur_aux))
|
||||
|
||||
return {
|
||||
'new_img_version': new_image_version,
|
||||
'cur_img_version': cur_image_version
|
||||
}
|
||||
1127
confluent_server/aiohmi/ipmi/oem/lenovo/nextscale.py
Normal file
1127
confluent_server/aiohmi/ipmi/oem/lenovo/nextscale.py
Normal file
File diff suppressed because it is too large
Load Diff
62
confluent_server/aiohmi/ipmi/oem/lenovo/pci.py
Executable file
62
confluent_server/aiohmi/ipmi/oem/lenovo/pci.py
Executable file
@@ -0,0 +1,62 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from aiohmi.ipmi.oem.lenovo import inventory
|
||||
|
||||
pci_fields = (
|
||||
inventory.EntryField("index", "B"),
|
||||
inventory.EntryField("PCIType", "B", mapper={
|
||||
0x0: "On board slot",
|
||||
0x1: "Riser Type 1",
|
||||
0x2: "Riser Type 2",
|
||||
0x3: "Riser Type 3",
|
||||
0x4: "Riser Type 4",
|
||||
0x5: "Riser Type 5",
|
||||
0x6: "Riser Type 6a",
|
||||
0x7: "Riser Type 6b",
|
||||
0x8: "ROC",
|
||||
0x9: "Mezz"
|
||||
}),
|
||||
inventory.EntryField("BusNumber", "B"),
|
||||
inventory.EntryField("DeviceFunction", "B"),
|
||||
inventory.EntryField("VendorID", "<H", presence=True),
|
||||
inventory.EntryField("DeviceID", "<H"),
|
||||
inventory.EntryField("SubSystemVendorID", "<H"),
|
||||
inventory.EntryField("SubSystemID", "<H"),
|
||||
inventory.EntryField("InterfaceType", "B"),
|
||||
inventory.EntryField("SubClassCode", "B"),
|
||||
inventory.EntryField("BaseClassCode", "B"),
|
||||
inventory.EntryField("LinkSpeed", "B"),
|
||||
inventory.EntryField("LinkWidth", "B"),
|
||||
inventory.EntryField("Reserved", "h")
|
||||
)
|
||||
|
||||
|
||||
def parse_pci_info(raw):
|
||||
return inventory.parse_inventory_category_entry(raw, pci_fields)
|
||||
|
||||
|
||||
def get_categories():
|
||||
return {
|
||||
"pci": {
|
||||
"idstr": "PCI {0}",
|
||||
"parser": parse_pci_info,
|
||||
"command": {
|
||||
"netfn": 0x06,
|
||||
"command": 0x59,
|
||||
"data": (0x00, 0xc1, 0x03, 0x00)
|
||||
},
|
||||
"workaround_bmc_bug": lambda t: t == "ami"
|
||||
}
|
||||
}
|
||||
111
confluent_server/aiohmi/ipmi/oem/lenovo/psu.py
Executable file
111
confluent_server/aiohmi/ipmi/oem/lenovo/psu.py
Executable file
@@ -0,0 +1,111 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from aiohmi.ipmi.oem.lenovo import inventory
|
||||
|
||||
|
||||
psu_type = {
|
||||
0b0001: "Other",
|
||||
0b0010: "Unknown",
|
||||
0b0011: "Linear",
|
||||
0b0100: "Switching",
|
||||
0b0101: "Battery",
|
||||
0b0110: "UPS",
|
||||
0b0111: "Converter",
|
||||
0b1000: "Regulator",
|
||||
}
|
||||
psu_status = {
|
||||
0b001: "Other",
|
||||
0b010: "Unknown",
|
||||
0b011: "OK",
|
||||
0b100: "Non-critical",
|
||||
0b101: "Critical; power supply has failed and has been taken off-line"
|
||||
}
|
||||
psu_voltage_range_switch = {
|
||||
0b0001: "Other",
|
||||
0b0010: "Unknown",
|
||||
0b0011: "Manual",
|
||||
0b0100: "Auto-switch",
|
||||
0b0101: "Wide range",
|
||||
0b0110: "Not applicable"
|
||||
}
|
||||
|
||||
|
||||
def psu_status_word_slice(w, s, e):
|
||||
return int(w[-e - 1:-s], 2)
|
||||
|
||||
|
||||
def psu_status_word_bit(w, b):
|
||||
return int(w[-b - 1])
|
||||
|
||||
|
||||
def psu_status_word_parser(word):
|
||||
fields = {}
|
||||
word = "{0:016b}".format(word)
|
||||
|
||||
fields["DMTF Power Supply Type"] = psu_type.get(psu_status_word_slice(
|
||||
word, 10, 13), "Invalid")
|
||||
|
||||
fields["DMTF Input Voltage Range"] = psu_voltage_range_switch.get(
|
||||
psu_status_word_slice(word, 3, 6), "Invalid")
|
||||
|
||||
# Power supply is unplugged from the wall
|
||||
fields["Unplugged"] = bool(psu_status_word_bit(word, 2))
|
||||
|
||||
# Power supply is hot-replaceable
|
||||
fields["Hot Replaceable"] = bool(psu_status_word_bit(word, 0))
|
||||
|
||||
return fields
|
||||
|
||||
|
||||
psu_fields = (
|
||||
inventory.EntryField("index", "B"),
|
||||
inventory.EntryField("Presence State", "B", presence=True),
|
||||
inventory.EntryField("Capacity W", "<H"),
|
||||
inventory.EntryField("Board manufacturer", "18s"),
|
||||
inventory.EntryField("Board model", "18s"),
|
||||
inventory.EntryField("Board manufacture date", "10s"),
|
||||
inventory.EntryField("Board serial number", "34s"),
|
||||
inventory.EntryField("Board manufacturer revision", "5s"),
|
||||
inventory.EntryField("Board product name", "10s"),
|
||||
inventory.EntryField("PSU Asset Tag", "10s"),
|
||||
inventory.EntryField(
|
||||
"PSU Redundancy Status",
|
||||
"B",
|
||||
valuefunc=lambda v: "Not redundant" if v == 0x00 else "Redundant"
|
||||
),
|
||||
inventory.EntryField(
|
||||
"PSU Status Word",
|
||||
"<H",
|
||||
valuefunc=psu_status_word_parser, multivaluefunc=True
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def parse_psu_info(raw):
|
||||
return inventory.parse_inventory_category_entry(raw, psu_fields)
|
||||
|
||||
|
||||
def get_categories():
|
||||
return {
|
||||
"psu": {
|
||||
"idstr": "Power Supply {0}",
|
||||
"parser": parse_psu_info,
|
||||
"command": {
|
||||
"netfn": 0x06,
|
||||
"command": 0x59,
|
||||
"data": (0x00, 0xc6, 0x00, 0x00)
|
||||
}
|
||||
}
|
||||
}
|
||||
64
confluent_server/aiohmi/ipmi/oem/lenovo/raid_controller.py
Normal file
64
confluent_server/aiohmi/ipmi/oem/lenovo/raid_controller.py
Normal file
@@ -0,0 +1,64 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from aiohmi.ipmi.oem.lenovo import inventory
|
||||
|
||||
|
||||
raid_controller_fields = (
|
||||
inventory.EntryField("ControllerID", "I"),
|
||||
inventory.EntryField("AdapterType", "B", mapper={
|
||||
0x00: "Unknown",
|
||||
0x01: "RAIDController"
|
||||
}),
|
||||
inventory.EntryField("SupercapPresence", "B", mapper={
|
||||
0x00: "Absent",
|
||||
0x01: "Present"
|
||||
}),
|
||||
inventory.EntryField("FlashComponent1Name", "16s"),
|
||||
inventory.EntryField("FlashComponent1Version", "64s"),
|
||||
inventory.EntryField("FlashComponent2Name", "16s"),
|
||||
inventory.EntryField("FlashComponent2Version", "64s"),
|
||||
inventory.EntryField("FlashComponent3Name", "16s"),
|
||||
inventory.EntryField("FlashComponent3Version", "64s"),
|
||||
inventory.EntryField("FlashComponent4Name", "16s"),
|
||||
inventory.EntryField("FlashComponent4Version", "64s"),
|
||||
inventory.EntryField("FlashComponent5Name", "16s"),
|
||||
inventory.EntryField("FlashComponent5Version", "64s"),
|
||||
inventory.EntryField("FlashComponent6Name", "16s"),
|
||||
inventory.EntryField("FlashComponent6Version", "64s"),
|
||||
inventory.EntryField("FlashComponent7Name", "16s"),
|
||||
inventory.EntryField("FlashComponent7Version", "64s"),
|
||||
inventory.EntryField("FlashComponent8Name", "16s"),
|
||||
inventory.EntryField("FlashComponent8Version", "64s")
|
||||
)
|
||||
|
||||
|
||||
def parse_raid_controller_info(raw):
|
||||
return inventory.parse_inventory_category_entry(
|
||||
raw, raid_controller_fields)
|
||||
|
||||
|
||||
def get_categories():
|
||||
return {
|
||||
"raid_controller": {
|
||||
"idstr": "RAID Controller {0}",
|
||||
"parser": parse_raid_controller_info,
|
||||
"countable": False,
|
||||
"command": {
|
||||
"netfn": 0x06,
|
||||
"command": 0x59,
|
||||
"data": (0x00, 0xc4, 0x00, 0x00)
|
||||
}
|
||||
}
|
||||
}
|
||||
71
confluent_server/aiohmi/ipmi/oem/lenovo/raid_drive.py
Normal file
71
confluent_server/aiohmi/ipmi/oem/lenovo/raid_drive.py
Normal file
@@ -0,0 +1,71 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from aiohmi.ipmi.oem.lenovo import inventory
|
||||
|
||||
|
||||
raid_drive_fields = (
|
||||
inventory.EntryField("index", "B"),
|
||||
inventory.EntryField("VendorID", "64s"),
|
||||
inventory.EntryField("Size", "I",
|
||||
valuefunc=lambda v: str(v) + " MB"),
|
||||
inventory.EntryField("MediaType", "B", mapper={
|
||||
0x00: "HDD",
|
||||
0x01: "SSD",
|
||||
0x02: "SSM_FLASH"
|
||||
}),
|
||||
inventory.EntryField("InterfaceType", "B", mapper={
|
||||
0x00: "Unknown",
|
||||
0x01: "ParallelSCSI",
|
||||
0x02: "SAS",
|
||||
0x03: "SATA",
|
||||
0x04: "FC"
|
||||
}),
|
||||
inventory.EntryField("FormFactor", "B", mapper={
|
||||
0x00: "Unknown",
|
||||
0x01: "2.5in",
|
||||
0x02: "3.5in"
|
||||
}),
|
||||
inventory.EntryField("LinkSpeed", "B", mapper={
|
||||
0x00: "Unknown",
|
||||
0x01: "1.5 Gb/s",
|
||||
0x02: "3.0 Gb/s",
|
||||
0x03: "6.0 Gb/s",
|
||||
0x04: "12.0 Gb/s"
|
||||
}),
|
||||
inventory.EntryField("SlotNumber", "B"),
|
||||
inventory.EntryField("ControllerIndex", "B"),
|
||||
inventory.EntryField("DeviceState", "B", mapper={
|
||||
0x00: "active",
|
||||
0x01: "stopped",
|
||||
0xff: "transitioning"
|
||||
}))
|
||||
|
||||
|
||||
def parse_raid_drive_info(raw):
|
||||
return inventory.parse_inventory_category_entry(raw, raid_drive_fields)
|
||||
|
||||
|
||||
def get_categories():
|
||||
return {
|
||||
"raid_raid_drive": {
|
||||
"idstr": "RAID Drive {0}",
|
||||
"parser": parse_raid_drive_info,
|
||||
"command": {
|
||||
"netfn": 0x06,
|
||||
"command": 0x59,
|
||||
"data": (0x00, 0xc5, 0x00, 0x00)
|
||||
}
|
||||
}
|
||||
}
|
||||
112
confluent_server/aiohmi/ipmi/oem/lookup.py
Executable file
112
confluent_server/aiohmi/ipmi/oem/lookup.py
Executable file
@@ -0,0 +1,112 @@
|
||||
# Copyright 2015 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
import aiohmi.ipmi.oem.generic as generic
|
||||
import aiohmi.ipmi.oem.lenovo.handler as lenovo
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# The mapping comes from
|
||||
# http://www.iana.org/assignments/enterprise-numbers/enterprise-numbers
|
||||
# Only mapping the ones with known backends
|
||||
oemmap = {
|
||||
20301: lenovo, # IBM x86 (and System X at Lenovo)
|
||||
19046: lenovo, # Lenovo x86 (e.g. Thinkserver)
|
||||
7154: lenovo,
|
||||
}
|
||||
|
||||
|
||||
async def get_oem_handler(oemid, ipmicmd, *args):
|
||||
# first try to find with composite key manufacturer_id.product_id,
|
||||
# if found return directly
|
||||
# then try to find with manufacturer_id
|
||||
for item in (
|
||||
'{}.{}'.format(oemid['manufacturer_id'], oemid['product_id']),
|
||||
oemid['manufacturer_id'],
|
||||
):
|
||||
if item in oemmap:
|
||||
return (await oemmap[item].OEMHandler.create(oemid, ipmicmd, *args), True)
|
||||
else:
|
||||
return await generic.OEMHandler.create(oemid, ipmicmd, *args), False
|
||||
|
||||
|
||||
def load_plugins():
|
||||
# load plugins and register oemmap
|
||||
path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
for plugindir in os.listdir(path):
|
||||
plugindir = os.path.join(path, plugindir)
|
||||
|
||||
if not os.path.isdir(plugindir):
|
||||
continue
|
||||
sys.path.insert(1, plugindir)
|
||||
# two passes, to avoid adding both py and pyc files
|
||||
find_plugin(path, plugindir)
|
||||
# restore path to not include the plugindir
|
||||
sys.path.pop(1)
|
||||
|
||||
|
||||
def find_plugin(base_dir, cur_dir):
|
||||
# scan to process items in the dir
|
||||
# if is a directory, go into the directory to find plugins
|
||||
# if is handler.py try to load and find the key to register
|
||||
# else skip
|
||||
for item in os.listdir(cur_dir):
|
||||
abs_path = os.path.join(cur_dir, item)
|
||||
if os.path.isdir(abs_path):
|
||||
find_plugin(base_dir, abs_path)
|
||||
elif item == 'handler.py':
|
||||
load_and_register(base_dir, cur_dir)
|
||||
else:
|
||||
pass
|
||||
|
||||
|
||||
def load_and_register(base_dir, cur_dir):
|
||||
try:
|
||||
oem_handler = __import__(make_plugin_name(base_dir, cur_dir),
|
||||
fromlist=['handler'])
|
||||
if 'device_type_supported' in oem_handler.__dict__:
|
||||
for type in oem_handler.device_type_supported:
|
||||
register_oem_map(type, oem_handler)
|
||||
else:
|
||||
logger.debug(
|
||||
'handler in {} does not support plugin.'.format(cur_dir))
|
||||
except Exception as ex:
|
||||
logger.exception('exception while loading handler in {} : {}'
|
||||
.format(cur_dir, ex))
|
||||
|
||||
|
||||
def register_oem_map(type, handler):
|
||||
if type in oemmap:
|
||||
logger.info('type {} already registered as {}, replaced with {}.'
|
||||
.format(type, oemmap.get(type), handler))
|
||||
oemmap[type] = handler
|
||||
else:
|
||||
oemmap[type] = handler
|
||||
|
||||
|
||||
def make_plugin_name(base_dir, cur_dir):
|
||||
return '{}.{}.{}'.format(__package__,
|
||||
os.path.relpath(cur_dir, base_dir)
|
||||
.replace('/', '.'),
|
||||
'handler')
|
||||
|
||||
|
||||
# load_plugins
|
||||
load_plugins()
|
||||
0
confluent_server/aiohmi/ipmi/private/__init__.py
Normal file
0
confluent_server/aiohmi/ipmi/private/__init__.py
Normal file
1886
confluent_server/aiohmi/ipmi/private/constants.py
Normal file
1886
confluent_server/aiohmi/ipmi/private/constants.py
Normal file
File diff suppressed because it is too large
Load Diff
145
confluent_server/aiohmi/ipmi/private/localsession.py
Normal file
145
confluent_server/aiohmi/ipmi/private/localsession.py
Normal file
@@ -0,0 +1,145 @@
|
||||
# Copyright 2017 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import ctypes
|
||||
import fcntl
|
||||
from select import select
|
||||
|
||||
import aiohmi.ipmi.private.util as iutil
|
||||
|
||||
|
||||
class IpmiMsg(ctypes.Structure):
|
||||
_fields_ = [('netfn', ctypes.c_ubyte),
|
||||
('cmd', ctypes.c_ubyte),
|
||||
('data_len', ctypes.c_short),
|
||||
('data', ctypes.POINTER(ctypes.c_ubyte))]
|
||||
|
||||
|
||||
class IpmiSystemInterfaceAddr(ctypes.Structure):
|
||||
_fields_ = [('addr_type', ctypes.c_int),
|
||||
('channel', ctypes.c_short),
|
||||
('lun', ctypes.c_ubyte)]
|
||||
|
||||
|
||||
class IpmiRecv(ctypes.Structure):
|
||||
_fields_ = [('recv_type', ctypes.c_int),
|
||||
('addr', ctypes.POINTER(IpmiSystemInterfaceAddr)),
|
||||
('addr_len', ctypes.c_uint),
|
||||
('msgid', ctypes.c_long),
|
||||
('msg', IpmiMsg)]
|
||||
|
||||
|
||||
class IpmiReq(ctypes.Structure):
|
||||
_fields_ = [('addr', ctypes.POINTER(IpmiSystemInterfaceAddr)),
|
||||
('addr_len', ctypes.c_uint),
|
||||
('msgid', ctypes.c_long),
|
||||
('msg', IpmiMsg)]
|
||||
|
||||
|
||||
_IONONE = 0
|
||||
_IOWRITE = 1
|
||||
_IOREAD = 2
|
||||
IPMICTL_SET_MY_ADDRESS_CMD = (
|
||||
_IOREAD << 30 | ctypes.sizeof(ctypes.c_uint) << 16
|
||||
| ord('i') << 8 | 17) # from ipmi.h
|
||||
IPMICTL_SEND_COMMAND = (
|
||||
_IOREAD << 30 | ctypes.sizeof(IpmiReq) << 16
|
||||
| ord('i') << 8 | 13) # from ipmi.h
|
||||
# next is really IPMICTL_RECEIVE_MSG_TRUNC, but will only use that
|
||||
IPMICTL_RECV = (
|
||||
(_IOWRITE | _IOREAD) << 30 | ctypes.sizeof(IpmiRecv) << 16
|
||||
| ord('i') << 8 | 11) # from ipmi.h
|
||||
BMC_SLAVE_ADDR = ctypes.c_uint(0x20)
|
||||
CURRCHAN = 0xf
|
||||
ADDRTYPE = 0xc
|
||||
|
||||
|
||||
class Session(object):
|
||||
def __init__(self, devnode='/dev/ipmi0'):
|
||||
"""Create a local session inband
|
||||
|
||||
:param: devnode: The path to the ipmi device
|
||||
"""
|
||||
self.ipmidev = open(devnode, 'r+')
|
||||
fcntl.ioctl(self.ipmidev, IPMICTL_SET_MY_ADDRESS_CMD, BMC_SLAVE_ADDR)
|
||||
# the interface is initted, create some reusable memory for our session
|
||||
self.databuffer = ctypes.create_string_buffer(4096)
|
||||
self.req = IpmiReq()
|
||||
self.rsp = IpmiRecv()
|
||||
self.addr = IpmiSystemInterfaceAddr()
|
||||
self.req.msg.data = ctypes.cast(
|
||||
ctypes.addressof(self.databuffer),
|
||||
ctypes.POINTER(ctypes.c_ubyte))
|
||||
self.rsp.msg.data = self.req.msg.data
|
||||
self.userid = None
|
||||
self.password = None
|
||||
|
||||
def await_reply(self):
|
||||
rd, _, _ = select((self.ipmidev,), (), (), 1)
|
||||
while not rd:
|
||||
rd, _, _ = select((self.ipmidev,), (), (), 1)
|
||||
|
||||
async def pause(self, seconds):
|
||||
await asyncio.sleep(seconds)
|
||||
|
||||
@property
|
||||
def parsed_rsp(self):
|
||||
response = {'netfn': self.rsp.msg.netfn, 'command': self.rsp.msg.cmd,
|
||||
'code': bytearray(self.databuffer.raw)[0],
|
||||
'data': bytearray(
|
||||
self.databuffer.raw[1:self.rsp.msg.data_len])}
|
||||
errorstr = iutil.get_ipmi_error(response)
|
||||
if errorstr:
|
||||
response['error'] = errorstr
|
||||
return response
|
||||
|
||||
def raw_command(self,
|
||||
netfn,
|
||||
command,
|
||||
data=(),
|
||||
bridge_request=None,
|
||||
retry=True,
|
||||
delay_xmit=None,
|
||||
timeout=None,
|
||||
waitall=False, rslun=0):
|
||||
self.addr.channel = CURRCHAN
|
||||
self.addr.addr_type = ADDRTYPE
|
||||
self.addr.lun = rslun
|
||||
self.req.addr_len = ctypes.sizeof(IpmiSystemInterfaceAddr)
|
||||
self.req.addr = ctypes.pointer(self.addr)
|
||||
self.req.msg.netfn = netfn
|
||||
self.req.msg.cmd = command
|
||||
if data:
|
||||
data = memoryview(bytearray(data))
|
||||
try:
|
||||
self.databuffer[:len(data)] = data[:len(data)]
|
||||
except ValueError:
|
||||
self.databuffer[:len(data)] = data[:len(data)].tobytes()
|
||||
self.req.msg.data_len = len(data)
|
||||
fcntl.ioctl(self.ipmidev, IPMICTL_SEND_COMMAND, self.req)
|
||||
self.await_reply()
|
||||
self.rsp.msg.data_len = 4096
|
||||
self.rsp.addr = ctypes.pointer(self.addr)
|
||||
self.rsp.addr_len = ctypes.sizeof(IpmiSystemInterfaceAddr)
|
||||
fcntl.ioctl(self.ipmidev, IPMICTL_RECV, self.rsp)
|
||||
return self.parsed_rsp
|
||||
|
||||
|
||||
def main():
|
||||
a = Session('/dev/ipmi0')
|
||||
print(repr(a.raw_command(0, 1)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
395
confluent_server/aiohmi/ipmi/private/serversession.py
Normal file
395
confluent_server/aiohmi/ipmi/private/serversession.py
Normal file
@@ -0,0 +1,395 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This represents the server side of a session object
|
||||
Split into a separate file to avoid overly manipulating the as-yet
|
||||
client-centered session object
|
||||
"""
|
||||
|
||||
import collections
|
||||
import hashlib
|
||||
import hmac
|
||||
import os
|
||||
import socket
|
||||
import struct
|
||||
import uuid
|
||||
|
||||
import aiohmi.ipmi.private.constants as constants
|
||||
import aiohmi.ipmi.private.session as ipmisession
|
||||
|
||||
|
||||
class ServerSession(ipmisession.Session):
|
||||
def __new__(cls, authdata, kg, clientaddr, netsocket, request, uuid,
|
||||
bmc):
|
||||
# Need to do default new type behavior. The normal session
|
||||
# takes measures to assure the caller shares even when they
|
||||
# didn't try. We don't have that operational mode to contend
|
||||
# with in the server case (one file descriptor per bmc)
|
||||
return object.__new__(cls)
|
||||
|
||||
def create_open_session_response(self, request):
|
||||
clienttag = request[0]
|
||||
# role = request[1]
|
||||
self.clientsessionid = request[4:8]
|
||||
# TODO(jbjohnso): intelligently handle integrity/auth/conf
|
||||
# for now, forcibly do cipher suite 3
|
||||
self.managedsessionid = os.urandom(4)
|
||||
# table 13-17, 1 for now (hmac-sha1), 3 should also be supported
|
||||
# table 13-18, integrity, 1 for now is hmac-sha1-96, 4 is sha256
|
||||
# confidentiality: 1 is aes-cbc-128, the only one
|
||||
self.privlevel = 4
|
||||
response = (bytearray([clienttag, 0, self.privlevel, 0])
|
||||
+ self.clientsessionid + self.managedsessionid
|
||||
+ bytearray([
|
||||
0, 0, 0, 8, 1, 0, 0, 0, # auth
|
||||
1, 0, 0, 8, 1, 0, 0, 0, # integrity
|
||||
2, 0, 0, 8, 1, 0, 0, 0, # privacy
|
||||
]))
|
||||
return response
|
||||
|
||||
def __init__(self, authdata, kg, clientaddr, netsocket, request, uuid,
|
||||
bmc):
|
||||
# begin conversation per RMCP+ open session request
|
||||
self.uuid = uuid
|
||||
self.currhashlib = hashlib.sha1
|
||||
self.currhashlen = 12
|
||||
self.rqaddr = constants.IPMI_BMC_ADDRESS
|
||||
self.authdata = authdata
|
||||
self.servermode = True
|
||||
self.ipmiversion = 2.0
|
||||
self.sequencenumber = 0
|
||||
self.sessionid = 0
|
||||
self.bmc = bmc
|
||||
self.lastpayload = None
|
||||
self.rqlun = None # This will be provided by the client
|
||||
self.broken = False
|
||||
self.authtype = 6
|
||||
self.integrityalgo = 0
|
||||
self.confalgo = 0
|
||||
self.kg = kg
|
||||
self.socket = netsocket
|
||||
self.sockaddr = clientaddr
|
||||
self.pendingpayloads = collections.deque([])
|
||||
self.pktqueue = collections.deque([])
|
||||
if clientaddr not in ipmisession.Session.bmc_handlers:
|
||||
ipmisession.Session.bmc_handlers[clientaddr] = {bmc.port: self}
|
||||
else:
|
||||
ipmisession.Session.bmc_handlers[clientaddr][bmc.port] = self
|
||||
response = self.create_open_session_response(bytearray(request))
|
||||
self.send_payload(response,
|
||||
constants.payload_types['rmcpplusopenresponse'],
|
||||
retry=False)
|
||||
|
||||
def _got_rmcp_openrequest(self, data):
|
||||
response = self.create_open_session_response(
|
||||
struct.pack('B' * len(data), *data))
|
||||
self.send_payload(response,
|
||||
constants.payload_types['rmcpplusopenresponse'],
|
||||
retry=False)
|
||||
|
||||
def _got_rakp1(self, data):
|
||||
clienttag = data[0]
|
||||
self.Rm = data[8:24]
|
||||
self.rolem = data[24]
|
||||
self.maxpriv = self.rolem & 0b111
|
||||
namepresent = data[27]
|
||||
if namepresent == 0:
|
||||
# ignore null username for now
|
||||
return
|
||||
self.username = bytes(data[28:])
|
||||
password = self.authdata.get(self.username.decode('utf-8'))
|
||||
if password is None:
|
||||
# don't think about invalid usernames for now
|
||||
return
|
||||
uuidbytes = self.uuid.bytes
|
||||
self.uuiddata = uuidbytes
|
||||
self.Rc = os.urandom(16)
|
||||
hmacdata = (self.clientsessionid + self.managedsessionid
|
||||
+ self.Rm + self.Rc + uuidbytes
|
||||
+ bytearray([self.rolem, len(self.username)]))
|
||||
hmacdata += self.username
|
||||
self.kuid = password.encode('utf-8')
|
||||
if self.kg is None:
|
||||
self.kg = self.kuid
|
||||
authcode = hmac.new(
|
||||
self.kuid, bytes(hmacdata), hashlib.sha1).digest()
|
||||
# regretably, ipmi mandates the server send out an hmac first
|
||||
# akin to a leak of /etc/shadow, not too worrisome if the secret
|
||||
# is complex, but terrible for most likely passwords selected by
|
||||
# a human
|
||||
newmessage = (bytearray([clienttag, 0, 0, 0]) + self.clientsessionid
|
||||
+ self.Rc + uuidbytes + authcode)
|
||||
self.send_payload(newmessage, constants.payload_types['rakp2'],
|
||||
retry=False)
|
||||
|
||||
def _got_rakp2(self, data):
|
||||
# stub, server should not think about rakp2
|
||||
pass
|
||||
|
||||
def _got_rakp3(self, data):
|
||||
# for now drop rakp3 with bad authcode
|
||||
# respond correctly a TODO(jjohnson2), since Kg being used
|
||||
# yet incorrect is a scenario why rakp3 could be bad
|
||||
# even if rakp2 was good
|
||||
RmRc = self.Rm + self.Rc
|
||||
self.sik = hmac.new(self.kg,
|
||||
bytes(RmRc)
|
||||
+ struct.pack("2B", self.rolem, len(self.username))
|
||||
+ self.username, hashlib.sha1).digest()
|
||||
self.k1 = hmac.new(self.sik, b'\x01' * 20, hashlib.sha1).digest()
|
||||
self.k2 = hmac.new(self.sik, b'\x02' * 20, hashlib.sha1).digest()
|
||||
self.aeskey = self.k2[0:16]
|
||||
hmacdata = (self.Rc + self.clientsessionid
|
||||
+ struct.pack("2B", self.rolem, len(self.username))
|
||||
+ self.username)
|
||||
expectedauthcode = hmac.new(self.kuid, bytes(hmacdata), hashlib.sha1
|
||||
).digest()
|
||||
authcode = struct.pack("%dB" % len(data[8:]), *data[8:])
|
||||
if expectedauthcode != authcode:
|
||||
# TODO(jjohnson2): RMCP error back at invalid rakp3
|
||||
return
|
||||
clienttag = data[0]
|
||||
if data[1] != 0:
|
||||
# client did not like our response, so ignore the rakp3
|
||||
return
|
||||
self.localsid = struct.unpack('<I', self.managedsessionid)[0]
|
||||
self.ipmicallback = self.handle_client_request
|
||||
self._send_rakp4(clienttag, 0)
|
||||
|
||||
def handle_client_request(self, request):
|
||||
if request['netfn'] == 6 and request['command'] == 0x3b:
|
||||
pendingpriv = request['data'][0]
|
||||
returncode = 0
|
||||
if pendingpriv > 1:
|
||||
if pendingpriv > self.maxpriv:
|
||||
returncode = 0x81
|
||||
else:
|
||||
self.clientpriv = request['data'][0]
|
||||
self._send_ipmi_net_payload(code=returncode,
|
||||
data=[self.clientpriv])
|
||||
elif request['netfn'] == 6 and request['command'] == 0x3c:
|
||||
self.send_ipmi_response()
|
||||
self.close_server_session()
|
||||
else:
|
||||
self.bmc.handle_raw_request(request, self)
|
||||
|
||||
def close_server_session(self):
|
||||
pass
|
||||
|
||||
def _send_rakp4(self, tagvalue, statuscode):
|
||||
payload = bytearray(
|
||||
[tagvalue, statuscode, 0, 0]) + self.clientsessionid
|
||||
hmacdata = self.Rm + self.managedsessionid + self.uuiddata
|
||||
hmacdata = struct.pack('%dB' % len(hmacdata), *hmacdata)
|
||||
authdata = hmac.new(self.sik, hmacdata, hashlib.sha1).digest()[:12]
|
||||
payload += authdata
|
||||
self.send_payload(payload, constants.payload_types['rakp4'],
|
||||
retry=False)
|
||||
self.confalgo = 'aes'
|
||||
self.integrityalgo = 'sha1'
|
||||
self.sequencenumber = 1
|
||||
self.sessionid = struct.unpack(
|
||||
'<I', struct.pack('4B', *self.clientsessionid))[0]
|
||||
|
||||
def _got_rakp4(self, data):
|
||||
# stub, server should not think about rakp4
|
||||
pass
|
||||
|
||||
def _timedout(self):
|
||||
"""Expire a client session after a period of inactivity
|
||||
|
||||
After the session inactivity timeout, this invalidate the client
|
||||
session.
|
||||
"""
|
||||
# for now, we will have a non-configurable 60 second timeout
|
||||
pass
|
||||
|
||||
def _handle_channel_auth_cap(self, request):
|
||||
"""Handle incoming channel authentication capabilities request
|
||||
|
||||
This is used when serving as an IPMI target to service client
|
||||
requests for client authentication capabilities
|
||||
"""
|
||||
pass
|
||||
|
||||
def send_ipmi_response(self, data=[], code=0):
|
||||
self._send_ipmi_net_payload(data=data, code=code)
|
||||
|
||||
def logout(self):
|
||||
pass
|
||||
|
||||
|
||||
class IpmiServer(object):
|
||||
# auth capabilities for now is a static payload
|
||||
# for now always completion code 0, otherwise ignore
|
||||
# authentication type fixed to ipmi2, ipmi1 forbidden
|
||||
# 0b10000000
|
||||
|
||||
def __init__(self, authdata, port=623, bmcuuid=None, address='::'):
|
||||
"""Create a new ipmi bmc instance.
|
||||
|
||||
:param authdata: A dict or object with .get() to provide password
|
||||
lookup by username. This does not support the full
|
||||
complexity of what IPMI can support, only a
|
||||
reasonable subset.
|
||||
:param port: The default port number to bind to. Defaults to the
|
||||
standard 623
|
||||
:param address: The IP address to bind to. Defaults to '::' (all
|
||||
zeroes)
|
||||
"""
|
||||
self.revision = 0
|
||||
self.deviceid = 0
|
||||
self.firmwaremajor = 1
|
||||
self.firmwareminor = 0
|
||||
self.ipmiversion = 2
|
||||
self.additionaldevices = 0
|
||||
self.mfgid = 0
|
||||
self.prodid = 0
|
||||
self.pktqueue = collections.deque([])
|
||||
if bmcuuid is None:
|
||||
self.uuid = uuid.uuid4()
|
||||
else:
|
||||
self.uuid = bmcuuid
|
||||
lanchannel = 1
|
||||
authtype = 0b10000000 # ipmi2 only
|
||||
authstatus = 0b00000100 # change based on authdata/kg
|
||||
chancap = 0b00000010 # ipmi2 only
|
||||
oemdata = (0, 0, 0, 0)
|
||||
self.authdata = authdata
|
||||
self.authcap = struct.pack('BBBBBBBBB', 0, lanchannel, authtype,
|
||||
authstatus, chancap, *oemdata)
|
||||
self.kg = None
|
||||
self.timeout = 60
|
||||
self.port = port
|
||||
addrinfo = socket.getaddrinfo(address, port, 0,
|
||||
socket.SOCK_DGRAM)[0]
|
||||
self.serversocket = ipmisession.Session._assignsocket(addrinfo)
|
||||
ipmisession.Session.bmc_handlers[self.serversocket] = {0: self}
|
||||
|
||||
def send_auth_cap(self, myaddr, mylun, clientaddr, clientlun, clientseq,
|
||||
sockaddr):
|
||||
header = bytearray(
|
||||
b'\x06\x00\xff\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10')
|
||||
headerdata = [clientaddr, clientlun | (7 << 2)]
|
||||
headersum = ipmisession._checksum(*headerdata)
|
||||
header += bytearray(headerdata + [headersum, myaddr,
|
||||
mylun | (clientseq << 2), 0x38])
|
||||
header += self.authcap
|
||||
bodydata = struct.unpack('B' * len(header[17:]), bytes(header[17:]))
|
||||
header.append(ipmisession._checksum(*bodydata))
|
||||
ipmisession._io_sendto(self.serversocket, header, sockaddr)
|
||||
|
||||
def process_pktqueue(self):
|
||||
while self.pktqueue:
|
||||
pkt = self.pktqueue.popleft()
|
||||
self.sessionless_data(pkt[0], pkt[1])
|
||||
|
||||
def send_cipher_suites(self, myaddr, mylun, clientaddr, clientlun,
|
||||
clientseq, data, sockaddr):
|
||||
# the last two bytes is length of message, fixed at 14 for now
|
||||
# the rest is boilerplate ipmi, follow along in ipmi spec
|
||||
# 'example ipmi over lan' if desired
|
||||
header = bytearray(
|
||||
b'\x06\x00\xff\x07\x06\x00\x00\x00\x00'
|
||||
b'\x00\x00\x00\x00\x00\x0e\x00')
|
||||
# now the generic inner ipmi packet, per figure-13-4,
|
||||
# ipmi lan message formats
|
||||
ipmihdr = bytearray([clientaddr, clientlun | (7 << 2)])
|
||||
hdrsum = ipmisession._checksum(*ipmihdr)
|
||||
ipmihdr.append(hdrsum)
|
||||
rq = bytearray([myaddr, mylun | clientseq << 2, 0x54])
|
||||
# for now, hard code a cipher suite 3 only response
|
||||
rq.extend(bytearray(b'\x00\x01\xc0\x03\x01\x41\x81'))
|
||||
hdrsum = ipmisession._checksum(*rq)
|
||||
rq.append(hdrsum)
|
||||
pkt = header + ipmihdr + rq
|
||||
ipmisession._io_sendto(self.serversocket, pkt, sockaddr)
|
||||
|
||||
def sessionless_data(self, data, sockaddr):
|
||||
"""Examines unsolocited packet and decides appropriate action.
|
||||
|
||||
For a listening IpmiServer, a packet without an active session
|
||||
comes here for examination. If it is something that is utterly
|
||||
sessionless (e.g. get channel authentication), send the appropriate
|
||||
response. If it is a get session challenge or open rmcp+ request,
|
||||
spawn a session to handle the context.
|
||||
"""
|
||||
if len(data) < 22:
|
||||
return
|
||||
data = bytearray(data)
|
||||
if not (data[0] == 6 and data[2:4] == b'\xff\x07'): # not ipmi
|
||||
return
|
||||
if data[4] == 6: # ipmi 2 payload...
|
||||
payloadtype = data[5]
|
||||
if payloadtype not in (0, 16):
|
||||
return
|
||||
if payloadtype == 16: # new session to handle conversation
|
||||
ServerSession(self.authdata, self.kg, sockaddr,
|
||||
self.serversocket, data[16:], self.uuid,
|
||||
bmc=self)
|
||||
return
|
||||
# ditch two byte, because ipmi2 header is two
|
||||
# bytes longer than ipmi1 (payload type added, payload length 2).
|
||||
data = data[2:]
|
||||
myaddr, netfnlun = struct.unpack('2B', bytes(data[14:16]))
|
||||
netfn = (netfnlun & 0b11111100) >> 2
|
||||
mylun = netfnlun & 0b11
|
||||
if netfn == 6: # application request
|
||||
if data[19] == 0x38: # cmd = get channel auth capabilities
|
||||
verchannel, level = struct.unpack('2B', bytes(data[20:22]))
|
||||
version = verchannel & 0b10000000
|
||||
if version != 0b10000000:
|
||||
return
|
||||
channel = verchannel & 0b1111
|
||||
if channel != 0xe:
|
||||
return
|
||||
(clientaddr, clientlun) = struct.unpack(
|
||||
'BB', bytes(data[17:19]))
|
||||
clientseq = clientlun >> 2
|
||||
clientlun &= 0b11 # Lun is only the least significant bits
|
||||
level &= 0b1111
|
||||
self.send_auth_cap(myaddr, mylun, clientaddr, clientlun,
|
||||
clientseq, sockaddr)
|
||||
elif data[19] == 0x54:
|
||||
clientaddr, clientlun = data[17:19]
|
||||
clientseq = clientlun >> 2
|
||||
clientlun &= 0b11
|
||||
self.send_cipher_suites(myaddr, mylun, clientaddr, clientlun,
|
||||
clientseq, data, sockaddr)
|
||||
|
||||
def set_kg(self, kg):
|
||||
"""Sets the Kg for the BMC to use
|
||||
|
||||
In RAKP, Kg is a BMC-specific integrity key that can be set. If not
|
||||
set, Kuid is used for the integrity key
|
||||
"""
|
||||
try:
|
||||
self.kg = kg.encode('utf-8')
|
||||
except AttributeError:
|
||||
self.kg = kg
|
||||
|
||||
def send_device_id(self, session):
|
||||
response = [self.deviceid, self.revision, self.firmwaremajor,
|
||||
self.firmwareminor, self.ipmiversion,
|
||||
self.additionaldevices]
|
||||
response += struct.unpack('4B', struct.pack('<I', self.mfgid))
|
||||
response += struct.unpack('4B', struct.pack('<I', self.prodid))
|
||||
session.send_ipmi_response(data=response)
|
||||
|
||||
def handle_raw_request(self, request, session):
|
||||
# per table 5-2, completion code 0xc1 is 'unrecognized'
|
||||
session.send_ipmi_response(code=0xc1)
|
||||
|
||||
def logout(self):
|
||||
pass
|
||||
1975
confluent_server/aiohmi/ipmi/private/session.py
Normal file
1975
confluent_server/aiohmi/ipmi/private/session.py
Normal file
File diff suppressed because it is too large
Load Diff
1400
confluent_server/aiohmi/ipmi/private/simplesession.py
Normal file
1400
confluent_server/aiohmi/ipmi/private/simplesession.py
Normal file
File diff suppressed because it is too large
Load Diff
839
confluent_server/aiohmi/ipmi/private/spd.py
Normal file
839
confluent_server/aiohmi/ipmi/private/spd.py
Normal file
@@ -0,0 +1,839 @@
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This implements parsing of DDR SPD data. This is offered up in a pass
|
||||
through fashion by some service processors.
|
||||
|
||||
For now, just doing DDR3 and DDR4
|
||||
|
||||
In many cases, astute readers will note that some of the lookup tables
|
||||
should be a matter of math rather than lookup. However the SPD
|
||||
specification explicitly reserves values not in the lookup tables for
|
||||
future use. It has happened, for example, that a spec was amended
|
||||
with discontinuous values for a field that was until that point
|
||||
possible to derive in a formulaic way
|
||||
"""
|
||||
|
||||
import math
|
||||
import struct
|
||||
|
||||
jedec_ids = [
|
||||
{
|
||||
0x01: "AMD",
|
||||
0x02: "AMI",
|
||||
0x83: "Fairchild",
|
||||
0x04: "Fujitsu",
|
||||
0x85: "GTE",
|
||||
0x86: "Harris",
|
||||
0x07: "Hitachi",
|
||||
0x08: "Inmos",
|
||||
0x89: "Intel",
|
||||
0x8a: "I.T.T.",
|
||||
0x0b: "Intersil",
|
||||
0x8c: "Monolithic Memories",
|
||||
0x0d: "Mostek",
|
||||
0x0e: "Motorola",
|
||||
0x8f: "National",
|
||||
0x10: "NEC",
|
||||
0x91: "RCA",
|
||||
0x92: "Raytheon",
|
||||
0x13: "Conexant (Rockwell)",
|
||||
0x94: "Seeq",
|
||||
0x15: "Philips Semi. (Signetics)",
|
||||
0x16: "Synertek",
|
||||
0x97: "Texas Instruments",
|
||||
0x98: "Toshiba",
|
||||
0x19: "Xicor",
|
||||
0x1a: "Zilog",
|
||||
0x9b: "Eurotechnique",
|
||||
0x1c: "Mitsubishi",
|
||||
0x9d: "Lucent (AT&T)",
|
||||
0x9e: "Exel",
|
||||
0x1f: "Atmel",
|
||||
0x20: "SGS/Thomson",
|
||||
0xa1: "Lattice Semi.",
|
||||
0xa2: "NCR",
|
||||
0x23: "Wafer Scale Integration",
|
||||
0xa4: "IBM",
|
||||
0x25: "Tristar",
|
||||
0x26: "Visic",
|
||||
0xa7: "Intl. CMOS Technology",
|
||||
0xa8: "SSSI",
|
||||
0x29: "Microchip Technology",
|
||||
0x2a: "Ricoh Ltd.",
|
||||
0xab: "VLSI",
|
||||
0x2c: "Micron Technology",
|
||||
0xad: "Hyundai Electronics",
|
||||
0xae: "OKI Semiconductor",
|
||||
0x2f: "ACTEL",
|
||||
0xb0: "Sharp",
|
||||
0x31: "Catalyst",
|
||||
0x32: "Panasonic",
|
||||
0xb3: "IDT",
|
||||
0x34: "Cypress",
|
||||
0xb5: "DEC",
|
||||
0xb6: "LSI Logic",
|
||||
0x37: "Zarlink",
|
||||
0x38: "UTMC",
|
||||
0xb9: "Thinking Machine",
|
||||
0xba: "Thomson CSF",
|
||||
0x3b: "Integrated CMOS(Vertex)",
|
||||
0xbc: "Honeywell",
|
||||
0x3d: "Tektronix",
|
||||
0x3e: "Sun Microsystems",
|
||||
0xbf: "SST",
|
||||
0x40: "MOSEL",
|
||||
0xc1: "Infineon",
|
||||
0xc2: "Macronix",
|
||||
0x43: "Xerox",
|
||||
0xc4: "Plus Logic",
|
||||
0x45: "SunDisk",
|
||||
0x46: "Elan Circuit Tech.",
|
||||
0xc7: "European Silicon Str.",
|
||||
0xc8: "Apple Computer",
|
||||
0xc9: "Xilinx",
|
||||
0x4a: "Compaq",
|
||||
0xcb: "Protocol Engines",
|
||||
0x4c: "SCI",
|
||||
0xcd: "Seiko Instruments",
|
||||
0xce: "Samsung",
|
||||
0x4f: "I3 Design System",
|
||||
0xd0: "Klic",
|
||||
0x51: "Crosspoint Solutions",
|
||||
0x52: "Alliance Semiconductor",
|
||||
0xd3: "Tandem",
|
||||
0x54: "Hewlett-Packard",
|
||||
0xd5: "Intg. Silicon Solutions",
|
||||
0xd6: "Brooktree",
|
||||
0x57: "New Media",
|
||||
0x58: "MHS Electronic",
|
||||
0xd9: "Performance Semi.",
|
||||
0xda: "Winbond Electronic",
|
||||
0x5b: "Kawasaki Steel",
|
||||
0xdc: "Bright Micro",
|
||||
0x5d: "TECMAR",
|
||||
0x5e: "Exar",
|
||||
0xdf: "PCMCIA",
|
||||
0xe0: "LG Semiconductor",
|
||||
0x61: "Northern Telecom",
|
||||
0x62: "Sanyo",
|
||||
0xe3: "Array Microsystems",
|
||||
0x64: "Crystal Semiconductor",
|
||||
0xe5: "Analog Devices",
|
||||
0xe6: "PMC-Sierra",
|
||||
0x67: "Asparix",
|
||||
0x68: "Convex Computer",
|
||||
0xe9: "Quality Semiconductor",
|
||||
0xea: "Nimbus Technology",
|
||||
0x6b: "Transwitch",
|
||||
0xec: "Micronas (ITT Intermetall)",
|
||||
0x6d: "Cannon",
|
||||
0x6e: "Altera",
|
||||
0xef: "NEXCOM",
|
||||
0x70: "QUALCOMM",
|
||||
0xf1: "Sony",
|
||||
0xf2: "Cray Research",
|
||||
0x73: "AMS (Austria Micro)",
|
||||
0xf4: "Vitesse",
|
||||
0x75: "Aster Electronics",
|
||||
0x76: "Bay Networks (Synoptic)",
|
||||
0xf7: "Zentrum",
|
||||
0xf8: "TRW",
|
||||
0x79: "Thesys",
|
||||
0x7a: "Solbourne Computer",
|
||||
0xfb: "Allied-Signal",
|
||||
0x7c: "Dialog",
|
||||
0xfd: "Media Vision",
|
||||
0xfe: "Level One Communication",
|
||||
},
|
||||
{
|
||||
0x01: "Cirrus Logic",
|
||||
0x02: "National Instruments",
|
||||
0x83: "ILC Data Device",
|
||||
0x04: "Alcatel Mietec",
|
||||
0x85: "Micro Linear",
|
||||
0x86: "Univ. of NC",
|
||||
0x07: "JTAG Technologies",
|
||||
0x08: "Loral",
|
||||
0x89: "Nchip",
|
||||
0x8A: "Galileo Tech",
|
||||
0x0B: "Bestlink Systems",
|
||||
0x8C: "Graychip",
|
||||
0x0D: "GENNUM",
|
||||
0x0E: "VideoLogic",
|
||||
0x8F: "Robert Bosch",
|
||||
0x10: "Chip Express",
|
||||
0x91: "DATARAM",
|
||||
0x92: "United Microelec Corp.",
|
||||
0x13: "TCSI",
|
||||
0x94: "Smart Modular",
|
||||
0x15: "Hughes Aircraft",
|
||||
0x16: "Lanstar Semiconductor",
|
||||
0x97: "Qlogic",
|
||||
0x98: "Kingston",
|
||||
0x19: "Music Semi",
|
||||
0x1A: "Ericsson Components",
|
||||
0x9B: "SpaSE",
|
||||
0x1C: "Eon Silicon Devices",
|
||||
0x9D: "Programmable Micro Corp",
|
||||
0x9E: "DoD",
|
||||
0x1F: "Integ. Memories Tech.",
|
||||
0x20: "Corollary Inc.",
|
||||
0xA1: "Dallas Semiconductor",
|
||||
0xA2: "Omnivision",
|
||||
0x23: "EIV(Switzerland)",
|
||||
0xA4: "Novatel Wireless",
|
||||
0x25: "Zarlink (formerly Mitel)",
|
||||
0x26: "Clearpoint",
|
||||
0xA7: "Cabletron",
|
||||
0xA8: "Silicon Technology",
|
||||
0x29: "Vanguard",
|
||||
0x2A: "Hagiwara Sys-Com",
|
||||
0xAB: "Vantis",
|
||||
0x2C: "Celestica",
|
||||
0xAD: "Century",
|
||||
0xAE: "Hal Computers",
|
||||
0x2F: "Rohm Company Ltd.",
|
||||
0xB0: "Juniper Networks",
|
||||
0x31: "Libit Signal Processing",
|
||||
0x32: "Enhanced Memories Inc.",
|
||||
0xB3: "Tundra Semiconductor",
|
||||
0x34: "Adaptec Inc.",
|
||||
0xB5: "LightSpeed Semi.",
|
||||
0xB6: "ZSP Corp.",
|
||||
0x37: "AMIC Technology",
|
||||
0x38: "Adobe Systems",
|
||||
0xB9: "Dynachip",
|
||||
0xBA: "PNY Electronics",
|
||||
0x3B: "Newport Digital",
|
||||
0xBC: "MMC Networks",
|
||||
0x3D: "T Square",
|
||||
0x3E: "Seiko Epson",
|
||||
0xBF: "Broadcom",
|
||||
0x40: "Viking Components",
|
||||
0xC1: "V3 Semiconductor",
|
||||
0xC2: "Flextronics (formerly Orbit)",
|
||||
0x43: "Suwa Electronics",
|
||||
0xC4: "Transmeta",
|
||||
0x45: "Micron CMS",
|
||||
0x46: "American Computer & Digital Components Inc",
|
||||
0xC7: "Enhance 3000 Inc",
|
||||
0xC8: "Tower Semiconductor",
|
||||
0x49: "CPU Design",
|
||||
0x4A: "Price Point",
|
||||
0xCB: "Maxim Integrated Product",
|
||||
0x4C: "Tellabs",
|
||||
0xCD: "Centaur Technology",
|
||||
0xCE: "Unigen Corporation",
|
||||
0x4F: "Transcend Information",
|
||||
0xD0: "Memory Card Technology",
|
||||
0x51: "CKD Corporation Ltd.",
|
||||
0x52: "Capital Instruments, Inc.",
|
||||
0xD3: "Aica Kogyo, Ltd.",
|
||||
0x54: "Linvex Technology",
|
||||
0xD5: "MSC Vertriebs GmbH",
|
||||
0xD6: "AKM Company, Ltd.",
|
||||
0x57: "Dynamem, Inc.",
|
||||
0x58: "NERA ASA",
|
||||
0xD9: "GSI Technology",
|
||||
0xDA: "Dane-Elec (C Memory)",
|
||||
0x5B: "Acorn Computers",
|
||||
0xDC: "Lara Technology",
|
||||
0x5D: "Oak Technology, Inc.",
|
||||
0x5E: "Itec Memory",
|
||||
0xDF: "Tanisys Technology",
|
||||
0xE0: "Truevision",
|
||||
0x61: "Wintec Industries",
|
||||
0x62: "Super PC Memory",
|
||||
0xE3: "MGV Memory",
|
||||
0x64: "Galvantech",
|
||||
0xE5: "Gadzoox Nteworks",
|
||||
0xE6: "Multi Dimensional Cons.",
|
||||
0x67: "GateField",
|
||||
0x68: "Integrated Memory System",
|
||||
0xE9: "Triscend",
|
||||
0xEA: "XaQti",
|
||||
0x6B: "Goldenram",
|
||||
0xEC: "Clear Logic",
|
||||
0x6D: "Cimaron Communications",
|
||||
0x6E: "Nippon Steel Semi. Corp.",
|
||||
0xEF: "Advantage Memory",
|
||||
0x70: "AMCC",
|
||||
0xF1: "LeCroy",
|
||||
0xF2: "Yamaha Corporation",
|
||||
0x73: "Digital Microwave",
|
||||
0xF4: "NetLogic Microsystems",
|
||||
0x75: "MIMOS Semiconductor",
|
||||
0x76: "Advanced Fibre",
|
||||
0xF7: "BF Goodrich Data.",
|
||||
0xF8: "Epigram",
|
||||
0x79: "Acbel Polytech Inc.",
|
||||
0x7A: "Apacer Technology",
|
||||
0xFB: "Admor Memory",
|
||||
0x7C: "FOXCONN",
|
||||
0xFD: "Quadratics Superconductor",
|
||||
0xFE: "3COM",
|
||||
},
|
||||
{
|
||||
0x01: "Camintonn Corporation",
|
||||
0x02: "ISOA Incorporated",
|
||||
0x83: "Agate Semiconductor",
|
||||
0x04: "ADMtek Incorporated",
|
||||
0x85: "HYPERTEC",
|
||||
0x86: "Adhoc Technologies",
|
||||
0x07: "MOSAID Technologies",
|
||||
0x08: "Ardent Technologies",
|
||||
0x89: "Switchcore",
|
||||
0x8A: "Cisco Systems, Inc.",
|
||||
0x0B: "Allayer Technologies",
|
||||
0x8C: "WorkX AG",
|
||||
0x0D: "Oasis Semiconductor",
|
||||
0x0E: "Novanet Semiconductor",
|
||||
0x8F: "E-M Solutions",
|
||||
0x10: "Power General",
|
||||
0x91: "Advanced Hardware Arch.",
|
||||
0x92: "Inova Semiconductors GmbH",
|
||||
0x13: "Telocity",
|
||||
0x94: "Delkin Devices",
|
||||
0x15: "Symagery Microsystems",
|
||||
0x16: "C-Port Corporation",
|
||||
0x97: "SiberCore Technologies",
|
||||
0x98: "Southland Microsystems",
|
||||
0x19: "Malleable Technologies",
|
||||
0x1A: "Kendin Communications",
|
||||
0x9B: "Great Technology Microcomputer",
|
||||
0x1C: "Sanmina Corporation",
|
||||
0x9D: "HADCO Corporation",
|
||||
0x9E: "Corsair",
|
||||
0x1F: "Actrans System Inc.",
|
||||
0x20: "ALPHA Technologies",
|
||||
0xA1: "Cygnal Integrated Products Incorporated",
|
||||
0xA2: "Artesyn Technologies",
|
||||
0x23: "Align Manufacturing",
|
||||
0xA4: "Peregrine Semiconductor",
|
||||
0x25: "Chameleon Systems",
|
||||
0x26: "Aplus Flash Technology",
|
||||
0xA7: "MIPS Technologies",
|
||||
0xA8: "Chrysalis ITS",
|
||||
0x29: "ADTEC Corporation",
|
||||
0x2A: "Kentron Technologies",
|
||||
0xAB: "Win Technologies",
|
||||
0x2C: "ASIC Designs Inc",
|
||||
0xAD: "Extreme Packet Devices",
|
||||
0xAE: "RF Micro Devices",
|
||||
0x2F: "Siemens AG",
|
||||
0xB0: "Sarnoff Corporation",
|
||||
0x31: "Itautec Philco SA",
|
||||
0x32: "Radiata Inc.",
|
||||
0xB3: "Benchmark Elect. (AVEX)",
|
||||
0x34: "Legend",
|
||||
0xB5: "SpecTek Incorporated",
|
||||
0xB6: "Hi/fn",
|
||||
0x37: "Enikia Incorporated",
|
||||
0x38: "SwitchOn Networks",
|
||||
0xB9: "AANetcom Incorporated",
|
||||
0xBA: "Micro Memory Bank",
|
||||
0x3B: "ESS Technology",
|
||||
0xBC: "Virata Corporation",
|
||||
0x3D: "Excess Bandwidth",
|
||||
0x3E: "West Bay Semiconductor",
|
||||
0xBF: "DSP Group",
|
||||
0x40: "Newport Communications",
|
||||
0xC1: "Chip2Chip Incorporated",
|
||||
0xC2: "Phobos Corporation",
|
||||
0x43: "Intellitech Corporation",
|
||||
0xC4: "Nordic VLSI ASA",
|
||||
0x45: "Ishoni Networks",
|
||||
0x46: "Silicon Spice",
|
||||
0xC7: "Alchemy Semiconductor",
|
||||
0xC8: "Agilent Technologies",
|
||||
0x49: "Centillium Communications",
|
||||
0x4A: "W.L. Gore",
|
||||
0xCB: "HanBit Electronics",
|
||||
0x4C: "GlobeSpan",
|
||||
0xCD: "Element 14",
|
||||
0xCE: "Pycon",
|
||||
0x4F: "Saifun Semiconductors",
|
||||
0xD0: "Sibyte, Incorporated",
|
||||
0x51: "MetaLink Technologies",
|
||||
0x52: "Feiya Technology",
|
||||
0xD3: "I & C Technology",
|
||||
0x54: "Shikatronics",
|
||||
0xD5: "Elektrobit",
|
||||
0xD6: "Megic",
|
||||
0x57: "Com-Tier",
|
||||
0x58: "Malaysia Micro Solutions",
|
||||
0xD9: "Hyperchip",
|
||||
0xDA: "Gemstone Communications",
|
||||
0x5B: "Anadyne Microelectronics",
|
||||
0xDC: "3ParData",
|
||||
0x5D: "Mellanox Technologies",
|
||||
0x5E: "Tenx Technologies",
|
||||
0xDF: "Helix AG",
|
||||
0xE0: "Domosys",
|
||||
0x61: "Skyup Technology",
|
||||
0x62: "HiNT Corporation",
|
||||
0xE3: "Chiaro",
|
||||
0x64: "MCI Computer GMBH",
|
||||
0xE5: "Exbit Technology A/S",
|
||||
0xE6: "Integrated Technology Express",
|
||||
0x67: "AVED Memory",
|
||||
0x68: "Legerity",
|
||||
0xE9: "Jasmine Networks",
|
||||
0xEA: "Caspian Networks",
|
||||
0x6B: "nCUBE",
|
||||
0xEC: "Silicon Access Networks",
|
||||
0x6D: "FDK Corporation",
|
||||
0x6E: "High Bandwidth Access",
|
||||
0xEF: "MultiLink Technology",
|
||||
0x70: "BRECIS",
|
||||
0xF1: "World Wide Packets",
|
||||
0xF2: "APW",
|
||||
0x73: "Chicory Systems",
|
||||
0xF4: "Xstream Logic",
|
||||
0x75: "Fast-Chip",
|
||||
0x76: "Zucotto Wireless",
|
||||
0xF7: "Realchip",
|
||||
0xF8: "Galaxy Power",
|
||||
0x79: "eSilicon",
|
||||
0x7A: "Morphics Technology",
|
||||
0xFB: "Accelerant Networks",
|
||||
0x7C: "Silicon Wave",
|
||||
0xFD: "SandCraft",
|
||||
0xFE: "Elpida",
|
||||
},
|
||||
{
|
||||
0x01: "Solectron",
|
||||
0x02: "Optosys Technologies",
|
||||
0x83: "Buffalo (Formerly Melco)",
|
||||
0x04: "TriMedia Technologies",
|
||||
0x85: "Cyan Technologies",
|
||||
0x86: "Global Locate",
|
||||
0x07: "Optillion",
|
||||
0x08: "Terago Communications",
|
||||
0x89: "Ikanos Communications",
|
||||
0x8A: "Princeton Technology",
|
||||
0x0B: "Nanya Technology",
|
||||
0x8C: "Elite Flash Storage",
|
||||
0x0D: "Mysticom",
|
||||
0x0E: "LightSand Communications",
|
||||
0x8F: "ATI Technologies",
|
||||
0x10: "Agere Systems",
|
||||
0x91: "NeoMagic",
|
||||
0x92: "AuroraNetics",
|
||||
0x13: "Golden Empire",
|
||||
0x94: "Muskin",
|
||||
0x15: "Tioga Technologies",
|
||||
0x16: "Netlist",
|
||||
0x97: "TeraLogic",
|
||||
0x98: "Cicada Semiconductor",
|
||||
0x19: "Centon Electronics",
|
||||
0x1A: "Tyco Electronics",
|
||||
0x9B: "Magis Works",
|
||||
0x1C: "Zettacom",
|
||||
0x9D: "Cogency Semiconductor",
|
||||
0x9E: "Chipcon AS",
|
||||
0x1F: "Aspex Technology",
|
||||
0x20: "F5 Networks",
|
||||
0xA1: "Programmable Silicon Solutions",
|
||||
0xA2: "ChipWrights",
|
||||
0x23: "Acorn Networks",
|
||||
0xA4: "Quicklogic",
|
||||
0x25: "Kingmax Semiconductor",
|
||||
0x26: "BOPS",
|
||||
0xA7: "Flasys",
|
||||
0xA8: "BitBlitz Communications",
|
||||
0x29: "eMemory Technology",
|
||||
0x2A: "Procket Networks",
|
||||
0xAB: "Purple Ray",
|
||||
0x2C: "Trebia Networks",
|
||||
0xAD: "Delta Electronics",
|
||||
0xAE: "Onex Communications",
|
||||
0x2F: "Ample Communications",
|
||||
0xB0: "Memory Experts Intl",
|
||||
0x31: "Astute Networks",
|
||||
0x32: "Azanda Network Devices",
|
||||
0xB3: "Dibcom",
|
||||
0x34: "Tekmos",
|
||||
0xB5: "API NetWorks",
|
||||
0xB6: "Bay Microsystems",
|
||||
0x37: "Firecron Ltd",
|
||||
0x38: "Resonext Communications",
|
||||
0xB9: "Tachys Technologies",
|
||||
0xBA: "Equator Technology",
|
||||
0x3B: "Concept Computer",
|
||||
0xBC: "SILCOM",
|
||||
0x3D: "3Dlabs",
|
||||
0x3E: "ct Magazine",
|
||||
0xBF: "Sanera Systems",
|
||||
0x40: "Silicon Packets",
|
||||
0xC1: "Viasystems Group",
|
||||
0xC2: "Simtek",
|
||||
0x43: "Semicon Devices Singapore",
|
||||
0xC4: "Satron Handelsges",
|
||||
0x45: "Improv Systems",
|
||||
0x46: "INDUSYS GmbH",
|
||||
0xC7: "Corrent",
|
||||
0xC8: "Infrant Technologies",
|
||||
0x49: "Ritek Corp",
|
||||
0x4A: "empowerTel Networks",
|
||||
0xCB: "Hypertec",
|
||||
0x4C: "Cavium Networks",
|
||||
0xCD: "PLX Technology",
|
||||
0xCE: "Massana Design",
|
||||
0x4F: "Intrinsity",
|
||||
0xD0: "Valence Semiconductor",
|
||||
0x51: "Terawave Communications",
|
||||
0x52: "IceFyre Semiconductor",
|
||||
0xD3: "Primarion",
|
||||
0x54: "Picochip Designs Ltd",
|
||||
0xD5: "Silverback Systems",
|
||||
0xD6: "Jade Star Technologies",
|
||||
0x57: "Pijnenburg Securealink",
|
||||
0x58: "MemorySolutioN",
|
||||
0xD9: "Cambridge Silicon Radio",
|
||||
0xDA: "Swissbit",
|
||||
0x5B: "Nazomi Communications",
|
||||
0xDC: "eWave System",
|
||||
0x5D: "Rockwell Collins",
|
||||
0x5E: "PAION",
|
||||
0xDF: "Alphamosaic Ltd",
|
||||
0xE0: "Sandburst",
|
||||
0x61: "SiCon Video",
|
||||
0x62: "NanoAmp Solutions",
|
||||
0xE3: "Ericsson Technology",
|
||||
0x64: "PrairieComm",
|
||||
0xE5: "Mitac International",
|
||||
0xE6: "Layer N Networks",
|
||||
0x67: "Atsana Semiconductor",
|
||||
0x68: "Allegro Networks",
|
||||
0xE9: "Marvell Semiconductors",
|
||||
0xEA: "Netergy Microelectronic",
|
||||
0x6B: "NVIDIA",
|
||||
0xEC: "Internet Machines",
|
||||
0x6D: "Peak Electronics",
|
||||
0xEF: "Accton Technology",
|
||||
0x70: "Teradiant Networks",
|
||||
0xF1: "Europe Technologies",
|
||||
0xF2: "Cortina Systems",
|
||||
0x73: "RAM Components",
|
||||
0xF4: "Raqia Networks",
|
||||
0x75: "ClearSpeed",
|
||||
0x76: "Matsushita Battery",
|
||||
0xF7: "Xelerated",
|
||||
0xF8: "SimpleTech",
|
||||
0x79: "Utron Technology",
|
||||
0x7A: "Astec International",
|
||||
0xFB: "AVM gmbH",
|
||||
0x7C: "Redux Communications",
|
||||
0xFD: "Dot Hill Systems",
|
||||
0xFE: "TeraChip",
|
||||
},
|
||||
{
|
||||
0x01: "T-RAM Incorporated",
|
||||
0x02: "Innovics Wireless",
|
||||
0x83: "Teknovus",
|
||||
0x04: "KeyEye Communications",
|
||||
0x85: "Runcom Technologies",
|
||||
0x86: "RedSwitch",
|
||||
0x07: "Dotcast",
|
||||
0x08: "Silicon Mountain Memory",
|
||||
0x89: "Signia Technologies",
|
||||
0x8A: "Pixim",
|
||||
0x0B: "Galazar Networks",
|
||||
0x8C: "White Electronic Designs",
|
||||
0x0D: "Patriot Scientific",
|
||||
0x0E: "Neoaxiom Corporation",
|
||||
0x8F: "3Y Power Technology",
|
||||
0x10: "Europe Technologies",
|
||||
0x91: "Potentia Power Systems",
|
||||
0x92: "C-guys Incorporated",
|
||||
0x13: "Digital Communications Technology Incorporated",
|
||||
0x94: "Silicon-Based Technology",
|
||||
0x15: "Fulcrum Microsystems",
|
||||
0x16: "Positivo Informatica Ltd",
|
||||
0x97: "XIOtech Corporation",
|
||||
0x98: "PortalPlayer",
|
||||
0x19: "Zhiying Software",
|
||||
0x1A: "Direct2Data",
|
||||
0x9B: "Phonex Broadband",
|
||||
0x1C: "Skyworks Solutions",
|
||||
0x9D: "Entropic Communications",
|
||||
0x9E: "Pacific Force Technology",
|
||||
0x1F: "Zensys A/S",
|
||||
0x20: "Legend Silicon Corp.",
|
||||
0xA1: "sci-worx GmbH",
|
||||
0xA2: "Oasis Silicon Systems",
|
||||
0x23: "Renesas Technology",
|
||||
0xA4: "Raza Microelectronics",
|
||||
0x25: "Phyworks",
|
||||
0x26: "MediaTek",
|
||||
0xA7: "Non-cents Productions",
|
||||
0xA8: "US Modular",
|
||||
0x29: "Wintegra Ltd",
|
||||
0x2A: "Mathstar",
|
||||
0xAB: "StarCore",
|
||||
0x2C: "Oplus Technologies",
|
||||
0xAD: "Mindspeed",
|
||||
0xAE: "Just Young Computer",
|
||||
0x2F: "Radia Communications",
|
||||
0xB0: "OCZ",
|
||||
0x31: "Emuzed",
|
||||
0x32: "LOGIC Devices",
|
||||
0xB3: "Inphi Corporation",
|
||||
0x34: "Quake Technologies",
|
||||
0xB5: "Vixel",
|
||||
0xB6: "SolusTek",
|
||||
0x37: "Kongsberg Maritime",
|
||||
0x38: "Faraday Technology",
|
||||
0xB9: "Altium Ltd.",
|
||||
0xBA: "Insyte",
|
||||
0x3B: "ARM Ltd.",
|
||||
0xBC: "DigiVision",
|
||||
0x3D: "Vativ Technologies",
|
||||
0x3E: "Endicott Interconnect Technologies",
|
||||
0xBF: "Pericom",
|
||||
0x40: "Bandspeed",
|
||||
0xC1: "LeWiz Communications",
|
||||
0xC2: "CPU Technology",
|
||||
0x43: "Ramaxel Technology",
|
||||
0xC4: "DSP Group",
|
||||
0x45: "Axis Communications",
|
||||
0x46: "Legacy Electronics",
|
||||
0xC7: "Chrontel",
|
||||
0xC8: "Powerchip Semiconductor",
|
||||
0x49: "MobilEye Technologies",
|
||||
0x4A: "Excel Semiconductor",
|
||||
0xCB: "A-DATA Technology",
|
||||
0x4C: "VirtualDigm",
|
||||
},
|
||||
]
|
||||
|
||||
memory_types = {
|
||||
1: "STD FPM DRAM",
|
||||
2: "EDO",
|
||||
3: "Pipelined Nibble",
|
||||
4: "SDRAM",
|
||||
5: "ROM",
|
||||
6: "DDR SGRAM",
|
||||
7: "DDR SDRAM",
|
||||
8: "DDR2 SDRAM",
|
||||
9: "DDR2 SDRAM FB-DIMM",
|
||||
10: "DDR2 SDRAM FB-DIMM PROBE",
|
||||
11: "DDR3 SDRAM",
|
||||
12: "DDR4 SDRAM",
|
||||
0x12: "DDR5 SDRAM",
|
||||
}
|
||||
|
||||
module_types = {
|
||||
1: "RDIMM",
|
||||
2: "UDIMM",
|
||||
3: "SODIMM",
|
||||
4: "Micro-DIMM",
|
||||
5: "Mini-RDIMM",
|
||||
6: "Mini-UDIMM",
|
||||
}
|
||||
|
||||
ddr3_module_capacity = {
|
||||
0: 256,
|
||||
1: 512,
|
||||
2: 1024,
|
||||
3: 2048,
|
||||
4: 4096,
|
||||
5: 8192,
|
||||
6: 16384,
|
||||
7: 32768,
|
||||
}
|
||||
|
||||
ddr3_dev_width = {
|
||||
0: 4,
|
||||
1: 8,
|
||||
2: 16,
|
||||
3: 32,
|
||||
}
|
||||
|
||||
ddr3_ranks = {
|
||||
0: 1,
|
||||
1: 2,
|
||||
2: 3,
|
||||
3: 4
|
||||
}
|
||||
|
||||
ddr3_bus_width = {
|
||||
0: 8,
|
||||
1: 16,
|
||||
2: 32,
|
||||
3: 64,
|
||||
}
|
||||
|
||||
|
||||
def speed_from_clock(clock):
|
||||
return int(clock * 8 - (clock * 8 % 100))
|
||||
|
||||
|
||||
def decode_manufacturer(index, mfg):
|
||||
index &= 0x7f
|
||||
try:
|
||||
return jedec_ids[index][mfg]
|
||||
except (KeyError, IndexError):
|
||||
return 'Unknown ({0}, {1})'.format(index, mfg)
|
||||
|
||||
|
||||
def decode_spd_date(year, week):
|
||||
if year == 0 and week == 0:
|
||||
return 'Unknown'
|
||||
return '20{0:02x}-W{1:x}'.format(year, week)
|
||||
|
||||
|
||||
class SPD(object):
|
||||
def __init__(self, bytedata):
|
||||
"""Parsed memory information
|
||||
|
||||
Parse bytedata input and provide a structured detail about the
|
||||
described memory component
|
||||
|
||||
:param bytedata: A bytearray of data to decode
|
||||
:return:
|
||||
"""
|
||||
self.rawdata = bytearray(bytedata)
|
||||
spd = self.rawdata
|
||||
self.info = {'memory_type': memory_types.get(spd[2], 'Unknown')}
|
||||
if spd[2] == 11:
|
||||
self._decode_ddr3()
|
||||
elif spd[2] == 12:
|
||||
self._decode_ddr4()
|
||||
elif spd[2] == 0x12: # ddr5
|
||||
self._decode_ddr5()
|
||||
|
||||
def _decode_ddr3(self):
|
||||
spd = self.rawdata
|
||||
finetime = (spd[9] >> 4) / (spd[9] & 0xf)
|
||||
fineoffset = spd[34]
|
||||
if fineoffset & 0b10000000:
|
||||
# Take two's complement for negative offset
|
||||
fineoffset = 0 - ((fineoffset ^ 0xff) + 1)
|
||||
fineoffset = (finetime * fineoffset) * 10 ** -3
|
||||
mtb = spd[10] / float(spd[11])
|
||||
clock = math.floor(2 / ((mtb * spd[12] + fineoffset) * 10 ** -3))
|
||||
self.info['speed'] = speed_from_clock(clock)
|
||||
self.info['ecc'] = (spd[8] & 0b11000) != 0
|
||||
self.info['module_type'] = module_types.get(spd[3] & 0xf, 'Unknown')
|
||||
sdramcap = ddr3_module_capacity[spd[4] & 0xf]
|
||||
buswidth = ddr3_bus_width[spd[8] & 0b111]
|
||||
sdramwidth = ddr3_dev_width[spd[7] & 0b111]
|
||||
ranks = ddr3_ranks[(spd[7] & 0b111000) >> 3]
|
||||
self.info['capacity_mb'] = sdramcap / 8 * buswidth / sdramwidth * ranks
|
||||
self.info['manufacturer'] = decode_manufacturer(spd[117], spd[118])
|
||||
self.info['manufacture_location'] = spd[119]
|
||||
self.info['manufacture_date'] = decode_spd_date(spd[120], spd[121])
|
||||
self.info['serial'] = hex(struct.unpack(
|
||||
'>I', struct.pack('4B', *spd[122:126]))[0])[2:].rjust(8, '0')
|
||||
self.info['model'] = struct.pack('20B', *spd[128:148]).strip(
|
||||
b'\x00\xff ')
|
||||
|
||||
def _decode_ddr5(self):
|
||||
spd = self.rawdata
|
||||
modtypes = {
|
||||
1: 'RDIMM',
|
||||
2: 'UDIMM',
|
||||
3: 'SODIMM',
|
||||
}
|
||||
sdramdensities = {
|
||||
1: 4,
|
||||
2: 8,
|
||||
3: 12,
|
||||
4: 16,
|
||||
5: 24,
|
||||
6: 32,
|
||||
7: 48,
|
||||
8: 64,
|
||||
}
|
||||
ddp = {
|
||||
0: 1,
|
||||
1: 2,
|
||||
2: 2,
|
||||
3: 4,
|
||||
4: 8,
|
||||
6: 16,
|
||||
}
|
||||
self.info['module_type'] = modtypes.get(
|
||||
spd[3], 'Unknown')
|
||||
self.info['manufacturer'] = decode_manufacturer(spd[512], spd[513])
|
||||
self.info['model'] = struct.pack('30B', *spd[521:551]).strip(
|
||||
b'\x00\xff ')
|
||||
self.info['serial'] = hex(struct.unpack(
|
||||
'>I', struct.pack('4B', *spd[517:521]))[0])[2:].rjust(8, '0')
|
||||
self.info['manufacture_date'] = decode_spd_date(spd[515], spd[516])
|
||||
self.info['manufacture_location'] = spd[514]
|
||||
self.info['ecc'] = (spd[235] & 0b11000) != 0
|
||||
if spd[19] == 0:
|
||||
tckmin = struct.unpack('<H', spd[20:22])[0]
|
||||
self.info['speed'] = math.floor(160000.0 / tckmin) * 100
|
||||
else:
|
||||
self.info['speed'] = 'Unknown'
|
||||
asymmetric = bool(spd[234] & 64)
|
||||
numrankspersubchannel = ((spd[234] & 56) >> 3) + 1
|
||||
subchannels = ((spd[235] & 0b01100000) >> 5) + 1
|
||||
buswidthpersubchannel = 2 ** ((spd[235] & 0b111) + 3)
|
||||
# these bits are either all, or for half the ranks in asymettric
|
||||
densityperdie = spd[4]
|
||||
sdramiowidth = 2**((spd[6] >> 5) + 2)
|
||||
densityperdie = sdramdensities.get(spd[4] & 0b11111, 0)
|
||||
diesperpackage = ddp.get(spd[4] >> 5, 1)
|
||||
capacity = (subchannels
|
||||
* (buswidthpersubchannel / sdramiowidth)
|
||||
* diesperpackage * densityperdie / 8
|
||||
* numrankspersubchannel)
|
||||
if asymmetric:
|
||||
capacity = capacity // 2 # the calculation is halved to make room for the odd ranks
|
||||
densityperdie = spd[8]
|
||||
sdramiowidth = 2**((spd[10] >> 5) + 2)
|
||||
densityperdie = sdramdensities.get(spd[8] & 0b11111, 0)
|
||||
diesperpackage = ddp.get(spd[8] >> 5, 1)
|
||||
oddcapacity = (subchannels
|
||||
* (buswidthpersubchannel / sdramiowidth)
|
||||
* diesperpackage * densityperdie / 8
|
||||
* numrankspersubchannel)
|
||||
oddcapacity = oddcapacity // 2 # this is halved, since this is only half the ranks
|
||||
capacity += oddcapacity
|
||||
self.info['capacity_mb'] = capacity * 1024
|
||||
|
||||
def _decode_ddr4(self):
|
||||
spd = self.rawdata
|
||||
if spd[17] == 0:
|
||||
fineoffset = spd[125]
|
||||
if fineoffset & 0b10000000:
|
||||
fineoffset = 0 - ((fineoffset ^ 0xff) + 1)
|
||||
clock = math.floor(
|
||||
2 / ((0.125 * spd[18] + fineoffset * 0.001) * 0.001))
|
||||
self.info['speed'] = speed_from_clock(clock)
|
||||
else:
|
||||
self.info['speed'] = 'Unknown'
|
||||
self.info['ecc'] = (spd[13] & 0b11000) == 0b1000
|
||||
self.info['module_type'] = module_types.get(spd[3] & 0xf,
|
||||
'Unknown')
|
||||
sdramcap = ddr3_module_capacity[spd[4] & 0xf]
|
||||
buswidth = ddr3_bus_width[spd[13] & 0b111]
|
||||
sdramwidth = ddr3_dev_width[spd[12] & 0b111]
|
||||
ranks = ddr3_ranks[(spd[12] & 0b111000) >> 3]
|
||||
if spd[6] & 0b11 == 0b10:
|
||||
ranks = ranks * (((spd[6] >> 4) & 0b111) + 1)
|
||||
self.info['capacity_mb'] = sdramcap / 8 * buswidth / sdramwidth * ranks
|
||||
self.info['manufacturer'] = decode_manufacturer(spd[320], spd[321])
|
||||
self.info['manufacture_location'] = spd[322]
|
||||
self.info['manufacture_date'] = decode_spd_date(spd[323], spd[324])
|
||||
self.info['serial'] = hex(struct.unpack(
|
||||
'>I', struct.pack('4B', *spd[325:329]))[0])[2:].rjust(8, '0')
|
||||
self.info['model'] = struct.pack('20B', *spd[329:349]).strip(
|
||||
b'\x00\xff ')
|
||||
134
confluent_server/aiohmi/ipmi/private/util.py
Normal file
134
confluent_server/aiohmi/ipmi/private/util.py
Normal file
@@ -0,0 +1,134 @@
|
||||
# Copyright 2015-2017 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import ctypes
|
||||
import functools
|
||||
import os
|
||||
import socket
|
||||
import struct
|
||||
|
||||
from aiohmi.ipmi.private import constants
|
||||
|
||||
try:
|
||||
range = xrange
|
||||
except NameError:
|
||||
pass
|
||||
try:
|
||||
buffer
|
||||
except NameError:
|
||||
buffer = memoryview
|
||||
|
||||
|
||||
wintime = None
|
||||
try:
|
||||
wintime = ctypes.windll.kernel32.GetTickCount64
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def decode_wireformat_uuid(rawguid, bigendian=False):
|
||||
"""Decode a wire format UUID
|
||||
|
||||
It handles the rather particular scheme where half is little endian
|
||||
and half is big endian. It returns a string like dmidecode would output.
|
||||
"""
|
||||
if isinstance(rawguid, list):
|
||||
rawguid = bytearray(rawguid)
|
||||
endian = '<IHH' # little endian
|
||||
if bigendian:
|
||||
endian = '>IHH' # big endian
|
||||
lebytes = struct.unpack_from(endian, buffer(rawguid[:8]))
|
||||
bebytes = struct.unpack_from('>HHI', buffer(rawguid[8:]))
|
||||
return '{0:08X}-{1:04X}-{2:04X}-{3:04X}-{4:04X}{5:08X}'.format(
|
||||
lebytes[0], lebytes[1], lebytes[2], bebytes[0], bebytes[1], bebytes[2])
|
||||
|
||||
|
||||
def urlsplit(url):
|
||||
"""Split an arbitrary url into protocol, host, rest
|
||||
|
||||
The standard urlsplit does not want to provide 'netloc' for arbitrary
|
||||
protocols, this works around that.
|
||||
|
||||
:param url: The url to split into component parts
|
||||
"""
|
||||
proto, rest = url.split(':', 1)
|
||||
host = ''
|
||||
if rest[:2] == '//':
|
||||
host, rest = rest[2:].split('/', 1)
|
||||
rest = '/' + rest
|
||||
return proto, host, rest
|
||||
|
||||
|
||||
def get_ipv4(hostname):
|
||||
"""Get list of ipv4 addresses for hostname
|
||||
|
||||
"""
|
||||
addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET,
|
||||
socket.SOCK_STREAM)
|
||||
return [addrinfo[x][4][0] for x in range(len(addrinfo))]
|
||||
|
||||
|
||||
def get_ipmi_error(response, suffix=""):
|
||||
if 'error' in response:
|
||||
return response['error'] + suffix
|
||||
code = response['code']
|
||||
if code == 0:
|
||||
return False
|
||||
command = response['command']
|
||||
netfn = response['netfn']
|
||||
if ((netfn, command) in constants.command_completion_codes
|
||||
and code in constants.command_completion_codes[(netfn, command)]):
|
||||
res = constants.command_completion_codes[(netfn, command)][code]
|
||||
res += suffix
|
||||
elif code in constants.ipmi_completion_codes:
|
||||
res = constants.ipmi_completion_codes[code] + suffix
|
||||
else:
|
||||
res = "Unknown code 0x%2x encountered" % code
|
||||
return res
|
||||
|
||||
|
||||
def _monotonic_time():
|
||||
"""Provides a monotonic timer
|
||||
|
||||
This code is concerned with relative, not absolute time.
|
||||
This function facilitates that prior to python 3.3
|
||||
"""
|
||||
# Python does not provide one until 3.3, so we make do
|
||||
# for most OSes, os.times()[4] works well.
|
||||
# for microsoft, GetTickCount64
|
||||
if wintime:
|
||||
return wintime() / 1000.0
|
||||
return os.times()[4]
|
||||
|
||||
|
||||
class protect(object):
|
||||
|
||||
def __init__(self, lock):
|
||||
self.lock = lock
|
||||
|
||||
def __call__(self, func):
|
||||
@functools.wraps(func)
|
||||
def _wrapper(*args, **kwargs):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
finally:
|
||||
self.lock.release()
|
||||
return _wrapper
|
||||
|
||||
def __enter__(self):
|
||||
self.lock.acquire()
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.lock.release()
|
||||
863
confluent_server/aiohmi/ipmi/sdr.py
Normal file
863
confluent_server/aiohmi/ipmi/sdr.py
Normal file
@@ -0,0 +1,863 @@
|
||||
# coding=utf8
|
||||
# Copyright 2014 IBM Corporation
|
||||
# Copyright 2015 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""This module provides access to SDR offered by a BMC
|
||||
|
||||
This data is common between 'sensors' and 'inventory' modules since SDR
|
||||
is both used to enumerate sensors for sensor commands and FRU ids for FRU
|
||||
commands
|
||||
|
||||
For now, we will not offer persistent SDR caching as we do in xCAT's IPMI
|
||||
code. Will see if it is adequate to advocate for high object reuse in a
|
||||
persistent process for the moment.
|
||||
|
||||
Focus is at least initially on the aspects that make the most sense for a
|
||||
remote client to care about. For example, smbus information is being
|
||||
skipped for now
|
||||
"""
|
||||
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
import struct
|
||||
import weakref
|
||||
|
||||
|
||||
import aiohmi.constants as const
|
||||
import aiohmi.exceptions as exc
|
||||
|
||||
|
||||
TYPE_UNKNOWN = 0
|
||||
TYPE_SENSOR = 1
|
||||
TYPE_FRU = 2
|
||||
|
||||
shared_sdrs = {}
|
||||
|
||||
|
||||
oem_type_offsets = {
|
||||
343: { # Intel
|
||||
149: { # Cascade Lake-AP
|
||||
0x7a: {
|
||||
0xda: {
|
||||
3: {
|
||||
'desc': 'Allowed',
|
||||
'severity': const.Health.Ok,
|
||||
},
|
||||
4: {
|
||||
'desc': 'Restricted',
|
||||
'severity': const.Health.Ok,
|
||||
},
|
||||
5: {
|
||||
'desc': 'Disabled',
|
||||
'severity': const.Health.Ok,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def ones_complement(value, bits):
|
||||
# utility function to help with the large amount of 2s
|
||||
# complement prevalent in ipmi spec
|
||||
signbit = 0b1 << (bits - 1)
|
||||
if value & signbit:
|
||||
# if negative, subtract 1, then take 1s
|
||||
# complement given bits width
|
||||
return 0 - (value ^ ((0b1 << bits) - 1))
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
def twos_complement(value, bits):
|
||||
# utility function to help with the large amount of 2s
|
||||
# complement prevalent in ipmi spec
|
||||
signbit = 0b1 << (bits - 1)
|
||||
if value & signbit:
|
||||
# if negative, subtract 1, then take 1s
|
||||
# complement given bits width
|
||||
return 0 - ((value - 1) ^ ((0b1 << bits) - 1))
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
unit_types = {
|
||||
# table 43-15 'sensor unit type codes'
|
||||
0: '',
|
||||
1: '°C',
|
||||
2: '°F',
|
||||
3: 'K',
|
||||
4: 'V',
|
||||
5: 'A',
|
||||
6: 'W',
|
||||
7: 'J',
|
||||
8: 'C',
|
||||
9: 'VA',
|
||||
10: 'nt',
|
||||
11: 'lm',
|
||||
12: 'lx',
|
||||
13: 'cd',
|
||||
14: 'kPa',
|
||||
15: 'PSI',
|
||||
16: 'N',
|
||||
17: 'CFM',
|
||||
18: 'RPM',
|
||||
19: 'Hz',
|
||||
20: 'μs',
|
||||
21: 'ms',
|
||||
22: 's',
|
||||
23: 'min',
|
||||
24: 'hr',
|
||||
25: 'd',
|
||||
26: 'week(s)',
|
||||
27: 'mil',
|
||||
28: 'inches',
|
||||
29: 'ft',
|
||||
30: 'cu in',
|
||||
31: 'cu feet',
|
||||
32: 'mm',
|
||||
33: 'cm',
|
||||
34: 'm',
|
||||
35: 'cu cm',
|
||||
36: 'cu m',
|
||||
37: 'L',
|
||||
38: 'fl. oz.',
|
||||
39: 'radians',
|
||||
40: 'steradians',
|
||||
41: 'revolutions',
|
||||
42: 'cycles',
|
||||
43: 'g',
|
||||
44: 'ounce',
|
||||
45: 'lb',
|
||||
46: 'ft-lb',
|
||||
47: 'oz-in',
|
||||
48: 'gauss',
|
||||
49: 'gilberts',
|
||||
50: 'henry',
|
||||
51: 'millihenry',
|
||||
52: 'farad',
|
||||
53: 'microfarad',
|
||||
54: 'ohms',
|
||||
55: 'siemens',
|
||||
56: 'mole',
|
||||
57: 'becquerel',
|
||||
58: 'ppm',
|
||||
60: 'dB',
|
||||
61: 'dBA',
|
||||
62: 'dBC',
|
||||
63: 'Gy',
|
||||
64: 'sievert',
|
||||
65: 'color temp deg K',
|
||||
66: 'bit',
|
||||
67: 'kb',
|
||||
68: 'mb',
|
||||
69: 'gb',
|
||||
70: 'byte',
|
||||
71: 'kB',
|
||||
72: 'mB',
|
||||
73: 'gB',
|
||||
74: 'word',
|
||||
75: 'dword',
|
||||
76: 'qword',
|
||||
77: 'line',
|
||||
78: 'hit',
|
||||
79: 'miss',
|
||||
80: 'retry',
|
||||
81: 'reset',
|
||||
82: 'overrun/overflow',
|
||||
83: 'underrun',
|
||||
84: 'collision',
|
||||
85: 'packets',
|
||||
86: 'messages',
|
||||
87: 'characters',
|
||||
88: 'error',
|
||||
89: 'uncorrectable error',
|
||||
90: 'correctable error',
|
||||
91: 'fatal error',
|
||||
92: 'grams',
|
||||
}
|
||||
|
||||
sensor_rates = {
|
||||
0: '',
|
||||
1: ' per us',
|
||||
2: ' per ms',
|
||||
3: ' per s',
|
||||
4: ' per minute',
|
||||
5: ' per hour',
|
||||
6: ' per day',
|
||||
}
|
||||
|
||||
|
||||
class SensorReading(object):
|
||||
"""Representation of the state of a sensor.
|
||||
|
||||
It is initialized by aiohmi internally, it does not make sense for
|
||||
a developer to create one of these objects directly.
|
||||
|
||||
It provides the following properties:
|
||||
name: UTF-8 string describing the sensor
|
||||
units: UTF-8 string describing the units of the sensor (if numeric)
|
||||
value: Value of the sensor if numeric
|
||||
imprecision: The amount by which the actual measured value may deviate from
|
||||
'value' due to limitations in the resolution of the given sensor.
|
||||
"""
|
||||
|
||||
def __init__(self, reading, suffix):
|
||||
self.broken_sensor_ids = {}
|
||||
self.health = const.Health.Ok
|
||||
self.type = reading['type']
|
||||
self.value = None
|
||||
self.imprecision = None
|
||||
self.states = []
|
||||
self.state_ids = []
|
||||
self.unavailable = 0
|
||||
try:
|
||||
self.health = reading['health']
|
||||
self.states = reading['states']
|
||||
self.state_ids = reading['state_ids']
|
||||
self.value = reading['value']
|
||||
self.imprecision = reading['imprecision']
|
||||
except KeyError:
|
||||
pass
|
||||
if 'unavailable' in reading:
|
||||
self.unavailable = 1
|
||||
self.units = suffix
|
||||
self.name = reading['name']
|
||||
|
||||
def __repr__(self):
|
||||
return repr({
|
||||
'value': self.value,
|
||||
'states': self.states,
|
||||
'state_ids': self.state_ids,
|
||||
'units': self.units,
|
||||
'imprecision': self.imprecision,
|
||||
'name': self.name,
|
||||
'type': self.type,
|
||||
'unavailable': self.unavailable,
|
||||
'health': self.health
|
||||
})
|
||||
|
||||
def simplestring(self):
|
||||
"""Return a summary string of the reading.
|
||||
|
||||
This is intended as a sampling of how the data could be presented by
|
||||
a UI. It's intended to help a developer understand the relation
|
||||
between the attributes of a sensor reading if it is not quite clear
|
||||
"""
|
||||
repr = self.name + ": "
|
||||
if self.value is not None:
|
||||
repr += str(self.value)
|
||||
repr += " ± " + str(self.imprecision)
|
||||
repr += self.units
|
||||
for state in self.states:
|
||||
repr += state + ","
|
||||
if self.health >= const.Health.Failed:
|
||||
repr += '(Failed)'
|
||||
elif self.health >= const.Health.Critical:
|
||||
repr += '(Critical)'
|
||||
elif self.health >= const.Health.Warning:
|
||||
repr += '(Warning)'
|
||||
return repr
|
||||
|
||||
|
||||
class SDREntry(object):
|
||||
"""Represent a single entry in the IPMI SDR.
|
||||
|
||||
This is created and consumed by aiohmi internally, there is no reason for
|
||||
external code to pay attention to this class.
|
||||
"""
|
||||
|
||||
def __init__(self, entrybytes, event_consts, reportunsupported=False,
|
||||
mfg_id=0, prod_id=0):
|
||||
self.mfg_id = mfg_id
|
||||
self.prod_id = prod_id
|
||||
self.event_consts = event_consts
|
||||
# ignore record id for now, we only care about the sensor number for
|
||||
# moment
|
||||
self.readable = True
|
||||
self.reportunsupported = reportunsupported
|
||||
if entrybytes[2] != 0x51:
|
||||
# only recognize '1.5', the only version defined at time of writing
|
||||
raise NotImplementedError
|
||||
self.rectype = entrybytes[3]
|
||||
self.linearization = None
|
||||
# most important to get going are 1, 2, and 11
|
||||
self.sdrtype = TYPE_SENSOR # assume a sensor
|
||||
if self.rectype == 1: # full sdr
|
||||
self.full_decode(entrybytes[5:])
|
||||
elif self.rectype == 2: # full sdr
|
||||
self.compact_decode(entrybytes[5:])
|
||||
elif self.rectype == 3: # event only
|
||||
self.eventonly_decode(entrybytes[5:])
|
||||
elif self.rectype == 8: # entity association
|
||||
self.association_decode(entrybytes[5:])
|
||||
elif self.rectype == 0x11: # FRU locator
|
||||
self.fru_decode(entrybytes[5:])
|
||||
elif self.rectype == 0x12: # Management controller
|
||||
self.mclocate_decode(entrybytes[5:])
|
||||
elif self.rectype == 0xc0: # OEM format
|
||||
self.sdrtype = TYPE_UNKNOWN # assume undefined
|
||||
self.oem_decode(entrybytes[5:])
|
||||
elif self.reportunsupported:
|
||||
raise NotImplementedError
|
||||
else:
|
||||
self.sdrtype = TYPE_UNKNOWN
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
if self.sdrtype == TYPE_SENSOR:
|
||||
return self.sensor_name
|
||||
elif self.sdrtype == TYPE_FRU:
|
||||
return self.fru_name
|
||||
else:
|
||||
return "UNKNOWN"
|
||||
|
||||
def oem_decode(self, entry):
|
||||
mfgid = entry[0] + (entry[1] << 8) + (entry[2] << 16)
|
||||
if self.reportunsupported:
|
||||
raise NotImplementedError("No support for mfgid %X" % mfgid)
|
||||
|
||||
def mclocate_decode(self, entry):
|
||||
# For now, we don't have use for MC locator records
|
||||
# we'll ignore them at the moment
|
||||
self.sdrtype = TYPE_UNKNOWN
|
||||
pass
|
||||
|
||||
def fru_decode(self, entry):
|
||||
# table 43-7 FRU Device Locator
|
||||
self.sdrtype = TYPE_FRU
|
||||
self.fru_name = self.tlv_decode(entry[10], entry[11:])
|
||||
self.fru_number = entry[1]
|
||||
self.fru_logical = (entry[2] & 0b10000000) == 0b10000000
|
||||
# 0x8 to 0x10.. 0 unspecified except on 0x10, 1 is dimm
|
||||
self.fru_type_and_modifier = (entry[5] << 8) + entry[6]
|
||||
|
||||
def association_decode(self, entry):
|
||||
# table 43-4 Entity Associaition Record
|
||||
# TODO(jbjohnso): actually represent this data
|
||||
self.sdrtype = TYPE_UNKNOWN
|
||||
|
||||
def eventonly_decode(self, entry):
|
||||
# table 43-3 event_only sensor record
|
||||
self._common_decode(entry)
|
||||
self.sensor_name = self.tlv_decode(entry[11], entry[12:])
|
||||
self.readable = False
|
||||
|
||||
def compact_decode(self, entry):
|
||||
# table 43-2 compact sensor record
|
||||
self._common_decode(entry)
|
||||
self.sensor_name = self.tlv_decode(entry[26], entry[27:])
|
||||
|
||||
def assert_trap_value(self, offset):
|
||||
trapval = (self.sensor_type_number << 16) + (self.reading_type << 8)
|
||||
return trapval + offset
|
||||
|
||||
def _common_decode(self, entry):
|
||||
# event only, compact and full are very similar
|
||||
# this function handles the common aspects of compact and full
|
||||
# offsets from spec, minus 6
|
||||
self.has_thresholds = False
|
||||
self.sensor_owner = entry[0]
|
||||
self.sensor_lun = entry[1] & 0x03
|
||||
self.sensor_number = entry[2]
|
||||
self.entity = self.event_consts.entity_ids.get(
|
||||
entry[3], 'Unknown entity {0}'.format(entry[3]))
|
||||
if self.rectype == 3:
|
||||
self.sensor_type_number = entry[5]
|
||||
self.reading_type = entry[6] # table 42-1
|
||||
else:
|
||||
self.sensor_type_number = entry[7]
|
||||
self.reading_type = entry[8] # table 42-1
|
||||
if self.rectype == 1 and entry[6] & 0b00001100:
|
||||
self.has_thresholds = True
|
||||
try:
|
||||
self.sensor_type = self.event_consts.sensor_type_codes[
|
||||
self.sensor_type_number]
|
||||
except KeyError:
|
||||
self.sensor_type = "UNKNOWN type " + str(self.sensor_type_number)
|
||||
if self.rectype == 3:
|
||||
return
|
||||
# 0: unspecified
|
||||
# 1: generic threshold based
|
||||
# 0x6f: discrete sensor-specific from table 42-3, sensor offsets
|
||||
# all others per table 42-2, generic discrete
|
||||
# numeric format is one of:
|
||||
# 0 - unsigned, 1 - 1s complement, 2 - 2s complement, 3 - ignore number
|
||||
# compact records are supposed to always write it as '3', presumably
|
||||
# to allow for the concept of a compact record with a numeric format
|
||||
# even though numerics are not allowed today. Some implementations
|
||||
# violate the spec and do something other than 3 today. Tolerate
|
||||
# the violation under the assumption that things are not so hard up
|
||||
# that there will ever be a need for compact sensors supporting numeric
|
||||
# values
|
||||
if self.rectype == 2:
|
||||
self.numeric_format = 3
|
||||
else:
|
||||
self.numeric_format = (entry[15] & 0b11000000) >> 6
|
||||
self.sensor_rate = sensor_rates[(entry[15] & 0b111000) >> 3]
|
||||
self.unit_mod = ""
|
||||
if (entry[15] & 0b110) == 0b10: # unit1 by unit2
|
||||
self.unit_mod = "/"
|
||||
elif (entry[15] & 0b110) == 0b100:
|
||||
# combine the units by multiplying, SI nomenclature is either spac
|
||||
# or hyphen, so go with space
|
||||
self.unit_mod = " "
|
||||
self.percent = ''
|
||||
if entry[15] & 1 == 1:
|
||||
self.percent = '% '
|
||||
if self.sensor_type_number == 0xb:
|
||||
if self.unit_mod == '':
|
||||
if entry[16] == 6:
|
||||
self.sensor_type = 'Power'
|
||||
elif self.unit_mod == ' ':
|
||||
if entry[16] == 6 and entry[17] in (22, 23, 24):
|
||||
self.sensor_type = 'Energy'
|
||||
self.baseunit = unit_types[entry[16]]
|
||||
self.modunit = unit_types[entry[17]]
|
||||
self.unit_suffix = self.percent + self.baseunit + self.unit_mod + \
|
||||
self.modunit
|
||||
|
||||
def full_decode(self, entry):
|
||||
# offsets are table from spec, minus 6
|
||||
# TODO(jbjohnso): table 43-13, put in constants to interpret entry[3]
|
||||
self._common_decode(entry)
|
||||
# now must extract the formula data to transform values
|
||||
# entry[18 to entry[24].
|
||||
# if not linear, must use get sensor reading factors
|
||||
# TODO(jbjohnso): the various other values
|
||||
self.sensor_name = self.tlv_decode(entry[42], entry[43:])
|
||||
self.linearization = entry[18] & 0b1111111
|
||||
if self.linearization <= 11:
|
||||
# the enumuration of linear sensors goes to 11,
|
||||
# static formula parameters are applicable, decode them
|
||||
# if 0x70, then the sesor reading will have to get the
|
||||
# factors on the fly.
|
||||
# the formula could apply if we bother with nominal
|
||||
# reading interpretation
|
||||
self.decode_formula(entry[19:25])
|
||||
|
||||
def _decode_state(self, state):
|
||||
mapping = self.event_consts.generic_type_offsets
|
||||
try:
|
||||
if self.reading_type in mapping:
|
||||
desc = mapping[self.reading_type][state]['desc']
|
||||
health = mapping[self.reading_type][state]['severity']
|
||||
elif self.reading_type == 0x6f:
|
||||
mapping = self.event_consts.sensor_type_offsets
|
||||
desc = mapping[self.sensor_type_number][state]['desc']
|
||||
health = mapping[self.sensor_type_number][state]['severity']
|
||||
elif self.reading_type >= 0x70 and self.reading_type <= 0x7f:
|
||||
sensedata = oem_type_offsets[self.mfg_id][self.prod_id][
|
||||
self.reading_type][self.sensor_type_number][state]
|
||||
desc = sensedata['desc']
|
||||
health = sensedata['severity']
|
||||
else:
|
||||
desc = "Unknown state %d" % state
|
||||
health = const.Health.Ok
|
||||
except KeyError:
|
||||
desc = "Unknown state %d for reading type %d/sensor type %d" % (
|
||||
state, self.reading_type, self.sensor_type_number)
|
||||
health = const.Health.Ok
|
||||
return desc, health
|
||||
|
||||
def decode_sensor_reading(self, ipmicmd, reading):
|
||||
numeric = None
|
||||
output = {
|
||||
'name': self.sensor_name,
|
||||
'type': self.sensor_type,
|
||||
'id': self.sensor_number,
|
||||
}
|
||||
if reading[1] & 0b100000 or not reading[1] & 0b1000000:
|
||||
output['unavailable'] = 1
|
||||
return SensorReading(output, self.unit_suffix)
|
||||
if self.numeric_format == 2:
|
||||
numeric = twos_complement(reading[0], 8)
|
||||
elif self.numeric_format == 1:
|
||||
numeric = ones_complement(reading[0], 8)
|
||||
elif self.numeric_format == 0 and (self.has_thresholds or self.reading_type == 1):
|
||||
numeric = reading[0]
|
||||
discrete = True
|
||||
if numeric is not None:
|
||||
lowerbound = numeric - (0.5 + (self.tolerance / 2.0))
|
||||
upperbound = numeric + (0.5 + (self.tolerance / 2.0))
|
||||
lowerbound = self.decode_value(ipmicmd, lowerbound)
|
||||
upperbound = self.decode_value(ipmicmd, upperbound)
|
||||
output['value'] = (lowerbound + upperbound) / 2.0
|
||||
output['imprecision'] = output['value'] - lowerbound
|
||||
discrete = False
|
||||
upper = 'upper'
|
||||
lower = 'lower'
|
||||
if self.linearization == 7:
|
||||
# if the formula is 1/x, then the intuitive sense of upper and
|
||||
# lower are backwards
|
||||
upper = 'lower'
|
||||
lower = 'upper'
|
||||
output['states'] = []
|
||||
output['state_ids'] = []
|
||||
output['health'] = const.Health.Ok
|
||||
if discrete:
|
||||
for state in range(8):
|
||||
if reading[2] & (0b1 << state):
|
||||
statedesc, health = self._decode_state(state)
|
||||
output['health'] |= health
|
||||
output['states'].append(statedesc)
|
||||
output['state_ids'].append(self.assert_trap_value(state))
|
||||
if len(reading) > 3:
|
||||
for state in range(7):
|
||||
if reading[3] & (0b1 << state):
|
||||
statedesc, health = self._decode_state(state + 8)
|
||||
output['health'] |= health
|
||||
output['states'].append(statedesc)
|
||||
output['state_ids'].append(
|
||||
self.assert_trap_value(state + 8))
|
||||
else:
|
||||
if reading[2] & 0b1:
|
||||
output['health'] |= const.Health.Warning
|
||||
output['states'].append(lower + " non-critical threshold")
|
||||
output['state_ids'].append(self.assert_trap_value(1))
|
||||
if reading[2] & 0b10:
|
||||
output['health'] |= const.Health.Critical
|
||||
output['states'].append(lower + " critical threshold")
|
||||
output['state_ids'].append(self.assert_trap_value(2))
|
||||
if reading[2] & 0b100:
|
||||
output['health'] |= const.Health.Failed
|
||||
output['states'].append(lower + " non-recoverable threshold")
|
||||
output['state_ids'].append(self.assert_trap_value(3))
|
||||
if reading[2] & 0b1000:
|
||||
output['health'] |= const.Health.Warning
|
||||
output['states'].append(upper + " non-critical threshold")
|
||||
output['state_ids'].append(self.assert_trap_value(4))
|
||||
if reading[2] & 0b10000:
|
||||
output['health'] |= const.Health.Critical
|
||||
output['states'].append(upper + " critical threshold")
|
||||
output['state_ids'].append(self.assert_trap_value(5))
|
||||
if reading[2] & 0b100000:
|
||||
output['health'] |= const.Health.Failed
|
||||
output['states'].append(upper + " non-recoverable threshold")
|
||||
output['state_ids'].append(self.assert_trap_value(6))
|
||||
return SensorReading(output, self.unit_suffix)
|
||||
|
||||
def _set_tmp_formula(self, ipmicmd, value):
|
||||
rsp = ipmicmd.raw_command(netfn=4, command=0x23,
|
||||
data=(self.sensor_number, value))
|
||||
# skip next reading field, not used in on-demand situation
|
||||
self.decode_formula(rsp['data'][1:])
|
||||
|
||||
def decode_value(self, ipmicmd, value):
|
||||
# Take the input value and return meaningful value
|
||||
linearization = self.linearization
|
||||
if linearization > 11: # direct calling code to get factors
|
||||
# for now, we will get the factors on demand
|
||||
# the facility is engineered such that at construction
|
||||
# time the entire BMC table should be fetchable in a reasonable
|
||||
# fashion. However for now opt for retrieving rows as needed
|
||||
# rather than tracking all that information for a relatively
|
||||
# rare behavior
|
||||
self._set_tmp_formula(ipmicmd, value)
|
||||
linearization = 0
|
||||
# time to compute the pre-linearization value.
|
||||
decoded = float((value * self.m + self.b)
|
||||
* (10 ** self.resultexponent))
|
||||
if linearization == 0:
|
||||
return decoded
|
||||
elif linearization == 1:
|
||||
return math.log(decoded)
|
||||
elif linearization == 2:
|
||||
return math.log(decoded, 10)
|
||||
elif linearization == 3:
|
||||
return math.log(decoded, 2)
|
||||
elif linearization == 4:
|
||||
return math.exp(decoded)
|
||||
elif linearization == 5:
|
||||
return 10 ** decoded
|
||||
elif linearization == 6:
|
||||
return 2 ** decoded
|
||||
elif linearization == 7:
|
||||
return 1 / decoded
|
||||
elif linearization == 8:
|
||||
return decoded ** 2
|
||||
elif linearization == 9:
|
||||
return decoded ** 3
|
||||
elif linearization == 10:
|
||||
return math.sqrt(decoded)
|
||||
elif linearization == 11:
|
||||
return decoded ** (1.0 / 3)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
def decode_formula(self, entry):
|
||||
self.m = twos_complement(entry[0] + ((entry[1] & 0b11000000) << 2), 10)
|
||||
self.tolerance = entry[1] & 0b111111
|
||||
self.b = twos_complement(entry[2] + ((entry[3] & 0b11000000) << 2), 10)
|
||||
self.accuracy = (entry[3] & 0b111111) + (entry[4] & 0b11110000) << 2
|
||||
self.accuracyexp = (entry[4] & 0b1100) >> 2
|
||||
self.direction = entry[4] & 0b11
|
||||
# 0 = n/a, 1 = input, 2 = output
|
||||
self.resultexponent = twos_complement((entry[5] & 0b11110000) >> 4, 4)
|
||||
bexponent = twos_complement(entry[5] & 0b1111, 4)
|
||||
# might as well do the math to 'b' now rather than wait for later
|
||||
self.b = self.b * (10**bexponent)
|
||||
|
||||
def tlv_decode(self, tlv, data):
|
||||
# Per IPMI 'type/length byte format
|
||||
ipmitype = (tlv & 0b11000000) >> 6
|
||||
if not len(data):
|
||||
return ""
|
||||
if ipmitype == 0: # Unicode per 43.15 in ipmi 2.0 spec
|
||||
# the spec is not specific about encoding, assuming utf8
|
||||
return struct.pack("%dB" % len(data), *data).decode("utf-8")
|
||||
elif ipmitype == 1: # BCD '+'
|
||||
tmpl = "%02X" * len(data)
|
||||
tstr = tmpl % tuple(data)
|
||||
tstr = tstr.replace("A", " ").replace("B", "-").replace("C", ".")
|
||||
return tstr.replace("D", ":").replace("E", ",").replace("F", "_")
|
||||
elif ipmitype == 2: # 6 bit ascii, start at 0x20
|
||||
# the ordering is very peculiar and is best understood from
|
||||
# IPMI SPEC "6-bit packed ascii example
|
||||
tstr = ""
|
||||
while len(data) >= 3: # the packing only works with 3 byte chunks
|
||||
tstr += chr((data[0] & 0b111111) + 0x20)
|
||||
tstr += chr(((data[1] & 0b1111) << 2) + (data[0] >> 6) + 0x20)
|
||||
tstr += chr(((data[2] & 0b11) << 4) + (data[1] >> 4) + 0x20)
|
||||
tstr += chr((data[2] >> 2) + 0x20)
|
||||
if not isinstance(tstr, str):
|
||||
tstr = tstr.decode('utf-8')
|
||||
return tstr
|
||||
elif ipmitype == 3: # ACSII+LATIN1
|
||||
ret = struct.pack("%dB" % len(data), *data)
|
||||
if not isinstance(ret, str):
|
||||
ret = ret.decode('utf-8')
|
||||
return ret
|
||||
|
||||
|
||||
class SDR(object):
|
||||
"""Examine the state of sensors managed by a BMC
|
||||
|
||||
Presents the data from sensor read commands as directed by the SDR in a
|
||||
reasonable format. This module is used by the command module, and is not
|
||||
intended for consumption by external code directly
|
||||
|
||||
:param ipmicmd: A Command class object
|
||||
"""
|
||||
def __init__(self, ipmicmd, cachedir=None):
|
||||
self.ipmicmd = weakref.proxy(ipmicmd)
|
||||
self.sensors = {}
|
||||
self.fru = {}
|
||||
self.cachedir = cachedir
|
||||
|
||||
async def initialize(self):
|
||||
await self.read_info()
|
||||
|
||||
async def read_info(self):
|
||||
# first, we want to know the device id
|
||||
rsp = await self.ipmicmd.raw_command(netfn=6, command=1)
|
||||
rsp['data'] = bytearray(rsp['data'])
|
||||
self.device_id = rsp['data'][0]
|
||||
self.device_rev = rsp['data'][1] & 0b111
|
||||
# Going to ignore device available until get sdr command
|
||||
# since that provides usefully distinct state and this does not
|
||||
self.fw_major = rsp['data'][2] & 0b1111111
|
||||
self.fw_minor = "%02X" % rsp['data'][3] # BCD encoding, oddly enough
|
||||
self.ipmiversion = rsp['data'][4] # 51h = 1.5, 02h = 2.0
|
||||
self.mfg_id = (rsp['data'][8] << 16) + (rsp['data'][7] << 8) + \
|
||||
rsp['data'][6]
|
||||
self.prod_id = (rsp['data'][10] << 8) + rsp['data'][9]
|
||||
if len(rsp['data']) > 11:
|
||||
self.aux_fw = self.decode_aux(rsp['data'][11:15])
|
||||
if rsp['data'][1] & 0b10000000 and rsp['data'][5] & 0b10 == 0:
|
||||
# The device has device sdrs, also does not support SDR repository
|
||||
# device, so we are meant to use an alternative mechanism to get
|
||||
# SDR data
|
||||
if rsp['data'][5] & 1:
|
||||
# The device has sensor device support, so in theory we should
|
||||
# be able to proceed
|
||||
# However at the moment, we haven't done so
|
||||
raise NotImplementedError
|
||||
return
|
||||
# We have Device SDR, without SDR Repository device, but
|
||||
# also without sensor device support, no idea how to
|
||||
# continue
|
||||
await self.get_sdr()
|
||||
|
||||
async def get_sdr_reservation(self):
|
||||
rsp = await self.ipmicmd.raw_command(netfn=0x0a, command=0x22)
|
||||
if rsp['code'] != 0:
|
||||
raise exc.IpmiException(rsp['error'])
|
||||
return rsp['data'][0] + (rsp['data'][1] << 8)
|
||||
|
||||
async def get_sdr(self):
|
||||
repinfo = await self.ipmicmd.raw_command(netfn=0x0a, command=0x20)
|
||||
repinfo['data'] = bytearray(repinfo['data'])
|
||||
if (repinfo['data'][0] != 0x51):
|
||||
# we only understand SDR version 51h, the only version defined
|
||||
# at time of this writing
|
||||
raise NotImplementedError
|
||||
# NOTE(jbjohnso): we actually don't need to care about 'numrecords'
|
||||
# since FFFF marks the end explicitly
|
||||
# numrecords = (rsp['data'][2] << 8) + rsp['data'][1]
|
||||
# NOTE(jbjohnso): don't care about 'free space' at the moment
|
||||
# NOTE(jbjohnso): most recent timstamp data for add and erase could be
|
||||
# handy to detect cache staleness, but for now will assume invariant
|
||||
# over life of session
|
||||
# NOTE(jbjohnso): not looking to support the various options in op
|
||||
# support, ignore those for now, reservation if some BMCs can't read
|
||||
# full SDR in one slurp
|
||||
modtime = struct.unpack('!Q', bytes(repinfo['data'][5:13]))[0]
|
||||
recid = 0
|
||||
rsvid = 0 # partial 'get sdr' will require this
|
||||
offset = 0
|
||||
size = 0xff
|
||||
chunksize = 128
|
||||
try:
|
||||
csdrs = shared_sdrs[
|
||||
(self.fw_major, self.fw_minor, self.mfg_id, self.prod_id,
|
||||
self.device_id, modtime)]
|
||||
self.sensors = csdrs['sensors']
|
||||
self.fru = csdrs['fru']
|
||||
return
|
||||
except KeyError:
|
||||
pass
|
||||
cachefilename = None
|
||||
self.broken_sensor_ids = {}
|
||||
if self.cachedir:
|
||||
cachefilename = 'sdrcache-2.{0}.{1}.{2}.{3}.{4}.{5}'.format(
|
||||
self.mfg_id, self.prod_id, self.device_id, self.fw_major,
|
||||
self.fw_minor, modtime)
|
||||
cachefilename = os.path.join(self.cachedir, cachefilename)
|
||||
if cachefilename and os.path.isfile(cachefilename):
|
||||
with open(cachefilename, 'rb') as cfile:
|
||||
csdrlen = cfile.read(2)
|
||||
while csdrlen:
|
||||
csdrlen = struct.unpack('!H', csdrlen)[0]
|
||||
await self.add_sdr(cfile.read(csdrlen))
|
||||
csdrlen = cfile.read(2)
|
||||
for sid in self.broken_sensor_ids:
|
||||
try:
|
||||
del self.sensors[sid]
|
||||
except KeyError:
|
||||
pass
|
||||
shared_sdrs[
|
||||
(self.fw_major, self.fw_minor, self.mfg_id, self.prod_id,
|
||||
self.device_id, modtime)] = {
|
||||
'sensors': self.sensors,
|
||||
'fru': self.fru,
|
||||
}
|
||||
return
|
||||
sdrraw = [] if cachefilename else None
|
||||
while recid != 0xffff: # per 33.12 Get SDR command, 0xffff marks end
|
||||
newrecid = 0
|
||||
currlen = 0
|
||||
sdrdata = bytearray()
|
||||
while True: # loop until SDR fetched wholly
|
||||
if size != 0xff and rsvid == 0:
|
||||
rsvid = await self.get_sdr_reservation()
|
||||
rqdata = [rsvid & 0xff, rsvid >> 8,
|
||||
recid & 0xff, recid >> 8,
|
||||
offset, size]
|
||||
sdrrec = await self.ipmicmd.raw_command(netfn=0x0a, command=0x23,
|
||||
data=rqdata)
|
||||
if sdrrec['code'] == 0xca:
|
||||
if size == 0xff: # get just 5 to get header to know length
|
||||
size = 5
|
||||
elif size > 5:
|
||||
size //= 2
|
||||
# push things over such that it's less
|
||||
# likely to be just 1 short of a read
|
||||
# and incur a whole new request
|
||||
size += 2
|
||||
chunksize = size
|
||||
continue
|
||||
if sdrrec['code'] == 0xc5: # need a new reservation id
|
||||
rsvid = 0
|
||||
continue
|
||||
if sdrrec['code'] != 0:
|
||||
raise exc.IpmiException(sdrrec['error'])
|
||||
if newrecid == 0:
|
||||
newrecid = (sdrrec['data'][1] << 8) + sdrrec['data'][0]
|
||||
if currlen == 0:
|
||||
currlen = sdrrec['data'][6] + 5 # compensate for header
|
||||
sdrdata.extend(sdrrec['data'][2:])
|
||||
# determine next offset to use based on current offset and the
|
||||
# size used last time.
|
||||
offset += size
|
||||
if offset >= currlen:
|
||||
break
|
||||
if size == 5 and offset == 5:
|
||||
# bump up size after header retrieval
|
||||
size = chunksize
|
||||
if (offset + size) > currlen:
|
||||
size = currlen - offset
|
||||
await self.add_sdr(sdrdata)
|
||||
if sdrraw is not None:
|
||||
sdrraw.append(bytes(sdrdata))
|
||||
offset = 0
|
||||
if size != 0xff:
|
||||
size = 5
|
||||
if newrecid == recid:
|
||||
raise exc.BmcErrorException("Incorrect SDR record id from BMC")
|
||||
recid = newrecid
|
||||
for sid in self.broken_sensor_ids:
|
||||
try:
|
||||
del self.sensors[sid]
|
||||
except KeyError:
|
||||
pass
|
||||
shared_sdrs[(self.fw_major, self.fw_minor, self.mfg_id, self.prod_id,
|
||||
self.device_id, modtime)] = {
|
||||
'sensors': self.sensors,
|
||||
'fru': self.fru,
|
||||
}
|
||||
if cachefilename:
|
||||
suffix = ''.join(
|
||||
random.choice(string.ascii_lowercase) for _ in range(12))
|
||||
with open(cachefilename + '.' + suffix, 'wb') as cfile:
|
||||
for csdr in sdrraw:
|
||||
cfile.write(struct.pack('!H', len(csdr)))
|
||||
cfile.write(csdr)
|
||||
os.rename(cachefilename + '.' + suffix, cachefilename)
|
||||
|
||||
def get_sensor_numbers(self):
|
||||
for number in self.sensors:
|
||||
if self.sensors[number].readable:
|
||||
yield number
|
||||
|
||||
async def make_sdr_entry(self, sdrbytes):
|
||||
return SDREntry(sdrbytes, await self.ipmicmd.get_event_constants(),
|
||||
False, self.mfg_id, self.prod_id)
|
||||
|
||||
async def add_sdr(self, sdrbytes):
|
||||
if not isinstance(sdrbytes[0], int):
|
||||
sdrbytes = bytearray(sdrbytes)
|
||||
newent = await self.make_sdr_entry(sdrbytes)
|
||||
if newent.sdrtype == TYPE_SENSOR:
|
||||
id = '{0}.{1}.{2}'.format(
|
||||
newent.sensor_owner, newent.sensor_number, newent.sensor_lun)
|
||||
if id in self.sensors:
|
||||
self.broken_sensor_ids[id] = True
|
||||
return
|
||||
self.sensors[id] = newent
|
||||
elif newent.sdrtype == TYPE_FRU:
|
||||
id = newent.fru_number
|
||||
if id in self.fru:
|
||||
self.broken_sensor_ids[id] = True
|
||||
return
|
||||
self.fru[id] = newent
|
||||
|
||||
def decode_aux(self, auxdata):
|
||||
# This is where manufacturers can add their own
|
||||
# decode information
|
||||
return "".join(hex(x) for x in auxdata)
|
||||
22
confluent_server/aiohmi/media.py
Normal file
22
confluent_server/aiohmi/media.py
Normal file
@@ -0,0 +1,22 @@
|
||||
# Copyright 2013 IBM Corporation
|
||||
# Copyright 2015-2017 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# These are the objects returned by list_media
|
||||
|
||||
|
||||
class Media(object):
|
||||
def __init__(self, name, url=None):
|
||||
self.name = name
|
||||
self.url = url
|
||||
0
confluent_server/aiohmi/redfish/__init__.py
Normal file
0
confluent_server/aiohmi/redfish/__init__.py
Normal file
1588
confluent_server/aiohmi/redfish/command.py
Normal file
1588
confluent_server/aiohmi/redfish/command.py
Normal file
File diff suppressed because it is too large
Load Diff
0
confluent_server/aiohmi/redfish/oem/__init__.py
Normal file
0
confluent_server/aiohmi/redfish/oem/__init__.py
Normal file
20
confluent_server/aiohmi/redfish/oem/ami/main.py
Normal file
20
confluent_server/aiohmi/redfish/oem/ami/main.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# Copyright 2025 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import aiohmi.redfish.oem.ami.megarac as megarac
|
||||
|
||||
|
||||
async def get_handler(sysinfo, sysurl, webclient, cache, cmd, rootinfo={}):
|
||||
return await megarac.OEMHandler.create(sysinfo, sysurl, webclient, cache,
|
||||
gpool=cmd._gpool)
|
||||
31
confluent_server/aiohmi/redfish/oem/ami/megarac.py
Normal file
31
confluent_server/aiohmi/redfish/oem/ami/megarac.py
Normal file
@@ -0,0 +1,31 @@
|
||||
# Copyright 2025 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import aiohmi.redfish.oem.generic as generic
|
||||
|
||||
|
||||
class OEMHandler(generic.OEMHandler):
|
||||
@classmethod
|
||||
async def create(cls, sysinfo, sysurl, webclient, cache, gpool=None):
|
||||
self = await super().create(sysinfo, sysurl, webclient, cache,
|
||||
gpool)
|
||||
if sysurl is None:
|
||||
systems, status = await webclient.grab_json_response_with_status('/redfish/v1/Systems')
|
||||
if status == 200:
|
||||
for system in systems.get('Members', []):
|
||||
if system.get('@odata.id', '').endswith('/Self') or system.get('@odata.id', '').endswith('/System_0'):
|
||||
sysurl = system['@odata.id']
|
||||
break
|
||||
self._varsysurl = sysurl
|
||||
return self
|
||||
43
confluent_server/aiohmi/redfish/oem/dell/idrac.py
Normal file
43
confluent_server/aiohmi/redfish/oem/dell/idrac.py
Normal file
@@ -0,0 +1,43 @@
|
||||
# Copyright 2022 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import aiohmi.redfish.oem.generic as generic
|
||||
|
||||
|
||||
class OEMHandler(generic.OEMHandler):
|
||||
|
||||
def set_bootdev(self, bootdev, persist=False, uefiboot=None,
|
||||
fishclient=None):
|
||||
# gleaned from web console, under configuration, system settings,
|
||||
# hardware, first boot device. iDrac presumes that the standard
|
||||
# explicitly refers only to physical devices. I think the intent
|
||||
# is the exact opposite for 'removable' media, and thus redirect
|
||||
# the 'physical' standard to the vFDD/VCD-DVD seen in the idrac
|
||||
# web gui
|
||||
if bootdev not in ('floppy', 'cd'):
|
||||
return super(OEMHandler, self).set_bootdev(bootdev, persist,
|
||||
uefiboot, fishclient)
|
||||
payload = {'Attributes': {}}
|
||||
if persist:
|
||||
payload['Attributes']['ServerBoot.1.BootOnce'] = 'Disabled'
|
||||
else:
|
||||
payload['Attributes']['ServerBoot.1.BootOnce'] = 'Enabled'
|
||||
if bootdev == 'floppy':
|
||||
payload['Attributes']['ServerBoot.1.FirstBootDevice'] = 'vFDD'
|
||||
elif bootdev == 'cd':
|
||||
payload['Attributes']['ServerBoot.1.FirstBootDevice'] = 'VCD-DVD'
|
||||
fishclient._do_web_request(
|
||||
'/redfish/v1/Managers/iDRAC.Embedded.1/Attributes',
|
||||
payload, method='PATCH')
|
||||
return {'bootdev': bootdev}
|
||||
20
confluent_server/aiohmi/redfish/oem/dell/main.py
Normal file
20
confluent_server/aiohmi/redfish/oem/dell/main.py
Normal file
@@ -0,0 +1,20 @@
|
||||
# Copyright 2022 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from aiohmi.redfish.oem.dell import idrac
|
||||
|
||||
|
||||
async def get_handler(sysinfo, sysurl, webclient, cache, cmd, rootinfo={}):
|
||||
return await idrac.OEMHandler.create(sysinfo, sysurl, webclient, cache,
|
||||
gpool=cmd._gpool)
|
||||
1663
confluent_server/aiohmi/redfish/oem/generic.py
Normal file
1663
confluent_server/aiohmi/redfish/oem/generic.py
Normal file
File diff suppressed because it is too large
Load Diff
66
confluent_server/aiohmi/redfish/oem/lenovo/main.py
Normal file
66
confluent_server/aiohmi/redfish/oem/lenovo/main.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# Copyright 2019 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import aiohmi.redfish.oem.generic as generic
|
||||
from aiohmi.redfish.oem.lenovo import tsma
|
||||
from aiohmi.redfish.oem.lenovo import xcc
|
||||
from aiohmi.redfish.oem.lenovo import xcc3
|
||||
from aiohmi.redfish.oem.lenovo import smm3
|
||||
|
||||
|
||||
async def get_handler(sysinfo, sysurl, webclient, cache, cmd, rootinfo={}):
|
||||
if not sysinfo: # we are before establishing there is one system, and one manager...
|
||||
systems, status = await webclient.grab_json_response_with_status('/redfish/v1/Systems')
|
||||
if status == 200:
|
||||
for system in systems.get('Members', []):
|
||||
if system.get('@odata.id', '').endswith('/1'):
|
||||
sysurl = system['@odata.id']
|
||||
sysinfo, status = await webclient.grab_json_response_with_status(sysurl)
|
||||
break
|
||||
leninf = sysinfo.get('Oem', {}).get('Lenovo', {})
|
||||
mgrinfo = {}
|
||||
if leninf:
|
||||
mgrinfo, status = await webclient.grab_json_response_with_status('/redfish/v1/Managers/1')
|
||||
if status != 200:
|
||||
mgrinfo = {}
|
||||
if not leninf:
|
||||
bmcinfo = await cmd.bmcinfo()
|
||||
if 'Ami' in bmcinfo.get('Oem', {}):
|
||||
return tsma.TsmHandler(sysinfo, sysurl, webclient, cache)
|
||||
elif 'xclarity controller' in mgrinfo.get('Model', '').lower():
|
||||
if mgrinfo['Model'].endswith('3'):
|
||||
return await xcc3.OEMHandler.create(sysinfo, sysurl, webclient, cache,
|
||||
gpool=cmd._gpool)
|
||||
else:
|
||||
return await xcc.OEMHandler.create(sysinfo, sysurl, webclient, cache,
|
||||
gpool=cmd._gpool)
|
||||
elif 'FrontPanelUSB' in leninf or 'USBManagementPortAssignment' in leninf or sysinfo.get('SKU', '').startswith('7X58'):
|
||||
return await xcc.OEMHandler.create(sysinfo, sysurl, webclient, cache,
|
||||
gpool=cmd._gpool)
|
||||
else:
|
||||
leninv = sysinfo.get('Links', {}).get('OEM', {}).get(
|
||||
'Lenovo', {}).get('Inventory', {})
|
||||
if 'hdd' in leninv and 'hostMAC' in leninv and 'backPlane' in leninv:
|
||||
return await tsma.TsmHandler.create(sysinfo, sysurl, webclient, cache,
|
||||
gpool=cmd._gpool)
|
||||
try:
|
||||
devdesc = await webclient.grab_json_response_with_status('/DeviceDescription.json')
|
||||
if devdesc[1] == 200:
|
||||
if devdesc[0]['type'].lower() in ('lenovo-smm3', 'smm3'):
|
||||
return smm3.OEMHandler(sysinfo, sysurl, webclient, cache,
|
||||
gpool=cmd._gpool)
|
||||
except Exception:
|
||||
pass
|
||||
return generic.OEMHandler(sysinfo, sysurl, webclient, cache,
|
||||
gpool=cmd._gpool)
|
||||
277
confluent_server/aiohmi/redfish/oem/lenovo/smm3.py
Normal file
277
confluent_server/aiohmi/redfish/oem/lenovo/smm3.py
Normal file
@@ -0,0 +1,277 @@
|
||||
# Copyright 2025 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import asyncio
|
||||
import copy
|
||||
import os
|
||||
import aiohmi.redfish.oem.generic as generic
|
||||
import aiohmi.constants as pygconst
|
||||
import aiohmi.util.webclient as webclient
|
||||
import aiohmi.exceptions as exc
|
||||
import time
|
||||
import socket
|
||||
|
||||
healthlookup = {
|
||||
'ok': pygconst.Health.Ok,
|
||||
'critical': pygconst.Health.Critical
|
||||
}
|
||||
|
||||
def _baytonumber(bay):
|
||||
try:
|
||||
return int(bay)
|
||||
except ValueError:
|
||||
if len(bay) == 2:
|
||||
# Treat a hexadecimal system as a leading decimal digit and letter compile
|
||||
# 1a == slot 1, 1b == slot 2, 2a == slot 1, etc..
|
||||
try:
|
||||
tmp = int(bay, 16)
|
||||
return (2 * (tmp >> 4) - 1) + ((tmp & 15) % 10)
|
||||
except ValueError:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _baytolabel(bay):
|
||||
try:
|
||||
baynum = int(bay)
|
||||
if baynum < 1:
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Reseat not supported for whole chassis')
|
||||
# need to convert to 1a, 1b, etc...
|
||||
vertidx = ((baynum - 1) // 2 + 1) << 4
|
||||
horizidx = (baynum - 1) % 2 + 10
|
||||
bayid = vertidx | horizidx
|
||||
return '{:02x}'.format(bayid)
|
||||
except ValueError:
|
||||
return bay
|
||||
return None
|
||||
|
||||
class OEMHandler(generic.OEMHandler):
|
||||
async def get_health(self, fishclient, verbose=True):
|
||||
rsp = await self._do_web_request('/redfish/v1/Chassis/chassis1')
|
||||
health = rsp.get('Status', {}).get('Health', 'Unknown').lower()
|
||||
health = healthlookup.get(health, pygconst.Health.Critical)
|
||||
return {'health': health}
|
||||
|
||||
async def set_identify(self, on=True, blink=False):
|
||||
if on:
|
||||
state = 'On'
|
||||
elif blink:
|
||||
state = 'Blinking'
|
||||
else:
|
||||
state = 'Off'
|
||||
await self._do_web_request('/redfish/v1/Chassis/chassis1', {
|
||||
'Oem': {'Lenovo': {'LED': {'IdentifyLED': {
|
||||
'State': state
|
||||
}}}
|
||||
}}, method='PATCH')
|
||||
|
||||
async def get_system_configuration(self, hideadvanced=True, fishclient=None):
|
||||
return {}
|
||||
|
||||
async def set_bmc_configuration(self, changeset):
|
||||
chassisparms = {}
|
||||
nodeparms = {}
|
||||
for setting, value in changeset.items():
|
||||
if setting == 'chassis_user_cap':
|
||||
chassisparms.setdefault('Oem', {}).setdefault('Lenovo', {}).setdefault('PowerCap', {})['UserPowerCap'] = int(value)
|
||||
elif setting == 'chassis_user_cap_active':
|
||||
capstate = value.lower().startswith('enable')
|
||||
chassisparms.setdefault('Oem', {}).setdefault('Lenovo', {}).setdefault('PowerCap', {})['UserPowerCapEnabled'] = capstate
|
||||
elif setting.startswith('node_') and setting.endswith('_user_cap'):
|
||||
nodeid = setting[5:-13]
|
||||
nodeparms.setdefault(nodeid, {}).setdefault('PowerCap', {})['UserPowerCap'] = int(value)
|
||||
elif setting.startswith('node_') and setting.endswith('_user_cap_active'):
|
||||
nodeid = setting[5:-20]
|
||||
capstate = value.lower().startswith('enable')
|
||||
nodeparms.setdefault(nodeid, {}).setdefault('PowerCap', {})['UserPowerCapEnabled'] = capstate
|
||||
if chassisparms:
|
||||
await self._do_web_request('/redfish/v1/Chassis/chassis1', chassisparms, method='PATCH')
|
||||
for nodeid, parms in nodeparms.items():
|
||||
url = '/redfish/v1/Chassis/chassis1/Oem/Lenovo/Nodes/{}'.format(nodeid)
|
||||
await self._do_web_request(url, parms, method='PATCH')
|
||||
|
||||
async def _get_cpu_inventory(self, withids=False):
|
||||
# Empty generator: no CPU inventory items for this OEM handler.
|
||||
if False:
|
||||
yield None
|
||||
|
||||
async def _get_mem_inventory(self, withids=False):
|
||||
# Empty generator: no memory inventory items for this OEM handler.
|
||||
if False:
|
||||
yield None
|
||||
|
||||
async def _get_adp_inventory(self, withids=False, urls=None):
|
||||
# Empty generator: no adapter inventory items for this OEM handler.
|
||||
if False:
|
||||
yield None
|
||||
|
||||
async def _get_disk_inventory(self, withids=False, urls=None):
|
||||
# Empty generator: no disk inventory items for this OEM handler.
|
||||
if False:
|
||||
yield None
|
||||
|
||||
async def get_bmc_configuration(self):
|
||||
settings = {}
|
||||
rsp = await self._do_web_request('/redfish/v1/Chassis/chassis1')
|
||||
chassiscap = rsp.get('Oem', {}).get('Lenovo', {}).get('PowerCap', {})
|
||||
usercap = chassiscap.get('UserPowerCap', None)
|
||||
capstate = chassiscap.get('UserPowerCapEnabled', False)
|
||||
mincap = chassiscap.get('MinimumPowerCap', None)
|
||||
maxcap = chassiscap.get('MaximumPowerCap', None)
|
||||
settings['chassis_user_cap'] = {
|
||||
'value': usercap,
|
||||
'help': 'Specify a maximum wattage to consume, this specific '
|
||||
'system implements a range from {0} to {1}.'.format(
|
||||
mincap, maxcap)
|
||||
}
|
||||
settings['chassis_user_cap_active'] = {
|
||||
'value': 'Enable' if capstate else 'Disable',
|
||||
'help': 'Specify whether the user capping setting should be '
|
||||
'used or not at the chassis level.',
|
||||
}
|
||||
rsp = await self._get_expanded_data('/redfish/v1/Chassis/chassis1/Oem/Lenovo/Nodes')
|
||||
for noderesp in rsp.get('Members', []):
|
||||
nodeid = noderesp.get('Id', 'unknown')
|
||||
nodecap = noderesp.get('PowerCap', {})
|
||||
usercap = nodecap.get('UserPowerCap', None)
|
||||
capstate = nodecap.get('UserPowerCapEnabled', False)
|
||||
mincap = nodecap.get('MinimumPowerCap', None)
|
||||
maxcap = nodecap.get('MaximumPowerCap', None)
|
||||
settings['node_{}_user_cap'.format(nodeid)] = {
|
||||
'value': usercap,
|
||||
'help': 'Specify a maximum wattage to consume for node '
|
||||
'{}, this specific node implements a range from '
|
||||
'{} to {}.'.format(
|
||||
nodeid, mincap, maxcap)
|
||||
}
|
||||
settings['node_{}_user_cap_active'.format(nodeid)] = {
|
||||
'value': 'Enable' if capstate else 'Disable',
|
||||
'help': 'Specify whether the user capping setting should be '
|
||||
'used or not at the node {} level.'.format(nodeid),
|
||||
}
|
||||
return settings
|
||||
|
||||
async def retrieve_firmware_upload_url(self):
|
||||
# SMMv3 needs to do the non-multipart upload
|
||||
usd = await self._do_web_request('/redfish/v1/UpdateService', cache=False)
|
||||
if usd.get('HttpPushUriTargetsBusy', False):
|
||||
raise exc.TemporaryError('Cannot run multtiple updates to '
|
||||
'same target concurrently')
|
||||
try:
|
||||
upurl = usd['HttpPushUri']
|
||||
except KeyError:
|
||||
raise exc.UnsupportedFunctionality('Redfish firmware update only supported for implementations with push update support')
|
||||
if 'HttpPushUriTargetsBusy' in usd:
|
||||
await self._do_web_request(
|
||||
'/redfish/v1/UpdateService',
|
||||
{'HttpPushUriTargetsBusy': True}, method='PATCH')
|
||||
return usd,upurl,False
|
||||
|
||||
async def continue_update(self, rsp, progress):
|
||||
# SMMv3 does not provide a response, must hardcode the continuation
|
||||
# /redfish/v1/UpdateService/FirmwareInventory/fwuimage
|
||||
rsp = await self._do_web_request('/redfish/v1/UpdateService/FirmwareInventory/fwuimage')
|
||||
for ri in rsp.get('RelatedItem', []):
|
||||
targ = ri.get('@odata.id', None)
|
||||
parms = {'Oem': {'Lenovo': {'SecureRollBack': False}}}
|
||||
rsp = await self._do_web_request('/redfish/v1/UpdateService', parms, method='PATCH')
|
||||
targspec = {'target': targ}
|
||||
rsp = await self._do_web_request('/redfish/v1/UpdateService/Actions/UpdateService.StartUpdate', targspec)
|
||||
monitorurl = rsp.get('@odata.id', None)
|
||||
return await self.monitor_update_progress(monitorurl, progress)
|
||||
|
||||
|
||||
|
||||
async def get_diagnostic_data(self, savefile, progress=None, autosuffix=False):
|
||||
tsk = await self._do_web_request(
|
||||
'/redfish/v1/Managers/bmc/LogServices/Dump/Actions/LogService.CollectDiagnosticData',
|
||||
{"DiagnosticDataType": "Manager"})
|
||||
taskrunning = True
|
||||
taskurl = tsk.get('@odata.id', None)
|
||||
pct = 0 if taskurl else 100
|
||||
durl = None
|
||||
while pct < 100 and taskrunning:
|
||||
status = await self._do_web_request(taskurl)
|
||||
durl = status.get('AdditionalDataURI', '')
|
||||
pct = status.get('PercentComplete', 0)
|
||||
taskrunning = status.get('TaskState', 'Complete') == 'Running'
|
||||
if progress:
|
||||
progress({'phase': 'initializing', 'progress': float(pct)})
|
||||
if taskrunning:
|
||||
await asyncio.sleep(3)
|
||||
if not durl:
|
||||
for hdr in status.get('Payload', {}).get('HttpHeaders', []):
|
||||
if hdr.startswith('Location: '):
|
||||
|
||||
enturl = hdr.replace('Location: ', '')
|
||||
entryinfo = await self._do_web_request(enturl)
|
||||
durl = entryinfo.get('AdditionalDataURI', None)
|
||||
break
|
||||
if not durl:
|
||||
raise Exception("Failed getting service data url")
|
||||
fname = os.path.basename(durl)
|
||||
if autosuffix and not savefile.endswith('.tar.xz'):
|
||||
savefile += time.strftime('-SMM3_%Y%m%d_%H%M%S.tar.xz')
|
||||
fd = webclient.make_downloader(self.webclient, durl, savefile)
|
||||
while not fd.completed():
|
||||
try:
|
||||
await fd.join(1)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
if progress and await fd.get_progress():
|
||||
progress({'phase': 'download',
|
||||
'progress': 100 * await fd.get_progress()})
|
||||
if fd.exc:
|
||||
raise fd.exc
|
||||
if progress:
|
||||
progress({'phase': 'complete'})
|
||||
return savefile
|
||||
|
||||
def _extract_fwinfo(self, inf):
|
||||
fwi, url = inf
|
||||
currinf = {}
|
||||
buildid = fwi.get('Oem', {}).get('Lenovo', {}).get('ExtendedVersion', None)
|
||||
if buildid:
|
||||
currinf['build'] = buildid
|
||||
return currinf
|
||||
|
||||
|
||||
async def _get_node_info(self):
|
||||
nodeinfo = self._varsysinfo
|
||||
if not nodeinfo:
|
||||
overview = await self._do_web_request('/redfish/v1/')
|
||||
chassismembs = overview.get('Chassis', {}).get('@odata.id', None)
|
||||
if not chassismembs:
|
||||
return nodeinfo
|
||||
chassislist = await self._do_web_request(chassismembs)
|
||||
chassismembs = chassislist.get('Members', [])
|
||||
if len(chassismembs) == 1:
|
||||
chassisurl = chassismembs[0]['@odata.id']
|
||||
nodeinfo = await self._do_web_request(chassisurl)
|
||||
newnodeinfo = copy.deepcopy(nodeinfo)
|
||||
newnodeinfo['SKU'] = nodeinfo['Model']
|
||||
newnodeinfo['Model'] = 'N1380 Enclosure'
|
||||
return newnodeinfo
|
||||
|
||||
async def reseat_bay(self, bay):
|
||||
bayid = _baytolabel(bay)
|
||||
url = '/redfish/v1/Chassis/chassis1/Oem/Lenovo/Nodes/{}/Actions/Node.Reseat'.format(bayid)
|
||||
rsp = await self._do_web_request(url, method='POST')
|
||||
|
||||
async def get_event_log(self, clear=False, fishclient=None):
|
||||
return await super().get_event_log(clear, fishclient, extraurls=[{'@odata.id':'/redfish/v1/Chassis/chassis1/LogServices/EventLog'}])
|
||||
|
||||
async def get_description(self, fishclient):
|
||||
return {'height': 13, 'slot': 0, 'slots': [8, 2]}
|
||||
888
confluent_server/aiohmi/redfish/oem/lenovo/tsma.py
Normal file
888
confluent_server/aiohmi/redfish/oem/lenovo/tsma.py
Normal file
@@ -0,0 +1,888 @@
|
||||
# Copyright 2015-2017 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
try:
|
||||
from urllib import urlencode
|
||||
except ImportError:
|
||||
from urllib.parse import urlencode
|
||||
|
||||
|
||||
import aiohmi.exceptions as exc
|
||||
import aiohmi.media as media
|
||||
import aiohmi.redfish.oem.generic as generic
|
||||
import aiohmi.util.webclient as webclient
|
||||
|
||||
hpm_by_filename = {}
|
||||
|
||||
|
||||
class HpmSection(object):
|
||||
__slots__ = ['comp_id', 'comp_ver', 'comp_name', 'section_flash', 'data',
|
||||
'hash_size', 'combo_image']
|
||||
|
||||
|
||||
def cstr_to_str(cstr):
|
||||
try:
|
||||
endidx = cstr.index(b'\x00')
|
||||
cstr = cstr[:endidx]
|
||||
except Exception:
|
||||
pass
|
||||
if not isinstance(cstr, str):
|
||||
cstr = cstr.decode('utf8')
|
||||
return cstr
|
||||
|
||||
|
||||
def read_hpm(filename, data):
|
||||
hpminfo = []
|
||||
if data:
|
||||
hpmfile = data
|
||||
hpmfile.seek(0)
|
||||
else:
|
||||
hpmfile = open(filename, 'rb')
|
||||
try:
|
||||
hpmfile.seek(0x20)
|
||||
skip = struct.unpack('>H', hpmfile.read(2))[0]
|
||||
hpmfile.seek(skip + 1, 1)
|
||||
sectype, compid = struct.unpack('BB', hpmfile.read(2))
|
||||
while sectype == 2:
|
||||
currsec = HpmSection()
|
||||
currsec.comp_id = compid
|
||||
hpmfile.seek(1, 1)
|
||||
major, minor, pat = struct.unpack('<BBI', hpmfile.read(6))
|
||||
currsec.comp_ver = '{0}.{1}.{2}'.format(major, minor, pat)
|
||||
currsec.comp_name = hpmfile.read(21).rstrip(b'\x00')
|
||||
currlen = struct.unpack('<I', hpmfile.read(4))[0] - 16
|
||||
oemstr = hpmfile.read(4)
|
||||
if oemstr != b'OEM\x00':
|
||||
raise Exception(
|
||||
'Unrecognized HPM field near {0}'.format(hpmfile.tell()))
|
||||
currsec.section_flash = struct.unpack('<I', hpmfile.read(4))[0]
|
||||
hashpresent, hdrsize, blocks = struct.unpack('BBB',
|
||||
hpmfile.read(3))
|
||||
if hashpresent != 1:
|
||||
hashpresent = 0
|
||||
currsec.hash_size = hashpresent * (256 * blocks + hdrsize)
|
||||
hpmfile.seek(5, 1)
|
||||
currsec.data = hpmfile.read(currlen)
|
||||
hpminfo.append(currsec)
|
||||
sectype, compid = struct.unpack('BB', hpmfile.read(2))
|
||||
upimg = (hpminfo[1].data[:-hpminfo[1].hash_size]
|
||||
+ hpminfo[2].data[:-hpminfo[2].hash_size])
|
||||
hpminfo[2].combo_image = upimg
|
||||
hpminfo[1].combo_image = upimg
|
||||
currpos = hpmfile.tell()
|
||||
hpmfile.seek(0, 2)
|
||||
endpos = hpmfile.tell()
|
||||
if currpos < (endpos - 512):
|
||||
raise Exception("Unexpected end of HPM file")
|
||||
finally:
|
||||
if not data:
|
||||
hpmfile.close()
|
||||
return hpminfo
|
||||
|
||||
|
||||
class TsmHandler(generic.OEMHandler):
|
||||
hostnic = 'usb0'
|
||||
|
||||
@classmethod
|
||||
async def create(cls, sysinfo, sysurl, webclient, cache=None, fish=None,
|
||||
gpool=None):
|
||||
self = await super(TsmHandler, cls).create(sysinfo, sysurl, webclient, cache, fish,
|
||||
gpool)
|
||||
if cache is None:
|
||||
cache = {}
|
||||
self._wc = None
|
||||
self.username = None
|
||||
self.password = None
|
||||
self.csrftok = None
|
||||
self.isipmi = bool(fish)
|
||||
self.fish = fish
|
||||
self.fishclient = None
|
||||
self.tsm = webclient.thehost
|
||||
self._certverify = webclient.verifycallback
|
||||
return self
|
||||
|
||||
async def clear_bmc_configuration(self):
|
||||
wc = await self.get_wc()
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/restore_defaults',
|
||||
{"id": 1,
|
||||
"sdr": 0,
|
||||
"fru": 1,
|
||||
"sel": 1,
|
||||
"ipmi": 1,
|
||||
"network": 1,
|
||||
"ntp": 1,
|
||||
"snmp": 1,
|
||||
"ssh": 1,
|
||||
"kvm": 1,
|
||||
"authentication": 1,
|
||||
"syslog": 0,
|
||||
"web": 1,
|
||||
"extlog": 0,
|
||||
"redfish": 1},
|
||||
method='PUT')
|
||||
|
||||
async def get_bmc_configuration(self):
|
||||
wc = await self.get_wc()
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/dns-info')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
settings = {}
|
||||
settings['dns_domain'] = {
|
||||
'value': rsp['domain_name']
|
||||
}
|
||||
dnssrvs = []
|
||||
for idx in range(3):
|
||||
currsrv = rsp.get('dns_server{0}'.format(idx + 1), '::')
|
||||
if currsrv and currsrv != '::':
|
||||
dnssrvs.append(currsrv)
|
||||
settings['dns_servers'] = {'value': ','.join(dnssrvs)}
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/LockoutPolicystatus')
|
||||
if status == 200:
|
||||
isenabled = rsp.get('Status', 0) == 1
|
||||
if isenabled:
|
||||
settings['password_login_failures'] = {'value': rsp.get(
|
||||
'Attemptstimes', 0)}
|
||||
else:
|
||||
settings['password_login_failures'] = {'value': 0}
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/GetPWComplex')
|
||||
if status == 200:
|
||||
settings['password_complexity'] = {'value': rsp.get(
|
||||
'pw_complex', 0)}
|
||||
return settings
|
||||
|
||||
async def set_bmc_configuration(self, changeset):
|
||||
dnschgs = {}
|
||||
wc = await self.get_wc()
|
||||
for key in changeset:
|
||||
if isinstance(changeset[key], str):
|
||||
changeset[key] = {'value': changeset[key]}
|
||||
currval = changeset[key].get('value', None)
|
||||
if 'dns_servers'.startswith(key.lower()):
|
||||
srvs = currval.split(',')
|
||||
for idx in range(3):
|
||||
if idx < len(srvs):
|
||||
dnschgs['dns_server{0}'.format(idx + 1)] = srvs[idx]
|
||||
else:
|
||||
dnschgs['dns_server{0}'.format(idx + 1)] = ''
|
||||
if 'dns_domain'.startswith(key.lower()):
|
||||
dnschgs['domain_name'] = currval
|
||||
if 'password_complexity'.startswith(key.lower()):
|
||||
self._set_pass_complexity(currval, wc)
|
||||
if 'password_login_failures'.startswith(key.lower()):
|
||||
await self._set_pass_lockout(currval, wc)
|
||||
if dnschgs:
|
||||
await self._set_dns_config(dnschgs, wc)
|
||||
|
||||
async def _set_pass_complexity(self, currval, wc):
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/SetPWComplex', {'Enable': currval})
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
|
||||
async def _set_pass_lockout(self, currval, wc):
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/LockoutPolicystatus')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
request = {
|
||||
'SameStatus': 0,
|
||||
'Lock_min': rsp.get('Locktime', 5),
|
||||
'Rest_min': rsp.get('Resettime', 1),
|
||||
'Attemptstimes': rsp.get('Attemptstimes', 3)
|
||||
}
|
||||
if currval == 0:
|
||||
request['Enable'] = 0
|
||||
else:
|
||||
request['Enable'] = 1
|
||||
request['Attemptstimes'] = currval
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/SetLockoutPolicy', request)
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
|
||||
async def _set_dns_config(self, dnschgs, wc):
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/dns-info')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
rsp['domain_manual'] = 1
|
||||
for i in range(3):
|
||||
keyn = 'dns_server{0}'.format(i + 1)
|
||||
if rsp[keyn] == '::':
|
||||
rsp[keyn] = ''
|
||||
for chg in dnschgs:
|
||||
rsp[chg] = dnschgs[chg]
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/dns-info', rsp, method='PUT')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/dns/restart', {'dns_status': 1}, method='PUT')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
|
||||
async def clear_uefi_configuration(self):
|
||||
if not self.fishclient:
|
||||
await self.init_redfish()
|
||||
return await self.fishclient.clear_system_configuration()
|
||||
|
||||
async def get_uefi_configuration(self, hideadvanced=True):
|
||||
if not self.fishclient:
|
||||
await self.init_redfish()
|
||||
return await self.fishclient.get_system_configuration(hideadvanced)
|
||||
|
||||
async def set_uefi_configuration(self, changeset):
|
||||
if not self.fishclient:
|
||||
await self.init_redfish()
|
||||
return await self.fishclient.set_system_configuration(changeset)
|
||||
|
||||
async def get_diagnostic_data(self, savefile, progress=None, autosuffix=False):
|
||||
wc = await self.get_wc()
|
||||
await wc.grab_json_response('/api/mini_ffdc', {'action': 'trigger'})
|
||||
status = 1
|
||||
percent = 0
|
||||
while status == 1:
|
||||
await asyncio.sleep(5)
|
||||
check = await wc.grab_json_response('/api/mini_ffdc',
|
||||
{'action': 'check'})
|
||||
status = check.get('status', -1)
|
||||
if progress:
|
||||
progress({'phase': 'initializing', 'progress': float(percent)})
|
||||
percent += 1
|
||||
if status != 2:
|
||||
raise Exception(
|
||||
"Unknown error generating service data: " + repr(check))
|
||||
if autosuffix and not savefile.endswith('.tar'):
|
||||
savefile += '.tar'
|
||||
fd = webclient.make_downloader(wc, '/api/mini_ffdc/package', savefile)
|
||||
while not fd.completed():
|
||||
try:
|
||||
await fd.join(1)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
if progress:
|
||||
currprog = await fd.get_progress()
|
||||
if currprog:
|
||||
progress({'phase': 'download',
|
||||
'progress': 100 * currprog})
|
||||
if fd.exc:
|
||||
raise fd.exc
|
||||
if progress:
|
||||
progress({'phase': 'complete'})
|
||||
return savefile
|
||||
|
||||
async def init_redfish(self):
|
||||
self.fishclient = await self.fish.Command.create(
|
||||
self.tsm, self.username, self.password,
|
||||
verifycallback=self._certverify)
|
||||
|
||||
async def get_ntp_enabled(self):
|
||||
wc = await self.get_wc()
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/date-time')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
return rsp.get('ntp_auto_date', 0) > 0
|
||||
|
||||
async def set_ntp_enabled(self, enabled):
|
||||
wc = await self.get_wc()
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/date-time')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
rsp['ntp_auto_date'] = 1 if enabled else 0
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/date-time', rsp, method='PUT')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
|
||||
async def get_ntp_servers(self):
|
||||
wc = await self.get_wc()
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/date-time')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
srvs = []
|
||||
pntp = rsp.get('primary_ntp', None)
|
||||
if pntp:
|
||||
srvs.append(pntp)
|
||||
pntp = rsp.get('secondary_ntp', None)
|
||||
if pntp:
|
||||
srvs.append(pntp)
|
||||
return srvs
|
||||
|
||||
async def set_ntp_server(self, server, index=0):
|
||||
wc = await self.get_wc()
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/date-time')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
if index == 0:
|
||||
rsp['primary_ntp'] = server
|
||||
elif index == 1:
|
||||
rsp['secondary_ntp'] = server
|
||||
rsp['ntp_auto_date'] = 1
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/settings/date-time', rsp, method='PUT')
|
||||
if status != 200:
|
||||
raise Exception(repr(rsp))
|
||||
|
||||
async def get_firmware_inventory(self, components, raisebypass=True,
|
||||
ipmicmd=None):
|
||||
wc = await self.get_wc()
|
||||
fwinf, status = await wc.grab_json_response_with_status(
|
||||
'/api/DeviceVersion')
|
||||
gotinfo = False
|
||||
if status < 200 or status >= 300:
|
||||
raise Exception('Error connecting to HTTP API')
|
||||
for biosinf in fwinf:
|
||||
if biosinf.get('device', None) != 1:
|
||||
continue
|
||||
if not biosinf.get('buildname', False):
|
||||
break
|
||||
biosres = {
|
||||
'build': biosinf['buildname']
|
||||
}
|
||||
if biosinf.get('main', False):
|
||||
biosres['version'] = '{0}.{1}'.format(
|
||||
biosinf['main'][0], biosinf['main'][1:])
|
||||
yield ('UEFI', biosres)
|
||||
gotinfo = True
|
||||
break
|
||||
for lxpminf in fwinf:
|
||||
if lxpminf.get('device', None) != 2:
|
||||
continue
|
||||
if not lxpminf.get('buildname', False):
|
||||
break
|
||||
lxpmres = {
|
||||
'build': lxpminf['buildname']
|
||||
}
|
||||
if lxpminf.get('main', False):
|
||||
subver = lxpminf.get('sub', 0)
|
||||
lxpmres['version'] = '{0}.{1:02x}'.format(
|
||||
lxpminf['main'], subver)
|
||||
yield ('LXPM', lxpmres)
|
||||
if ipmicmd:
|
||||
rsp = await ipmicmd.raw_command(0x3c, 0x40, data=(7, 2))
|
||||
buildid = cstr_to_str(bytes(rsp['data']))
|
||||
yield ('LXPM Windows Driver Bundle', {'build': buildid})
|
||||
rsp = await ipmicmd.raw_command(0x3c, 0x40, data=(7, 3))
|
||||
buildid = cstr_to_str(bytes(rsp['data']))
|
||||
yield ('LXPM Linux Driver Bundle', {'build': buildid})
|
||||
name = 'TSM'
|
||||
fwinf, status = await wc.grab_json_response_with_status('/api/get-sysfwinfo')
|
||||
if status != 200:
|
||||
raise Exception('Error {0} retrieving TSM version: {1}'.format(
|
||||
status, fwinf))
|
||||
for cinf in fwinf:
|
||||
if 'fw_ver' not in cinf:
|
||||
continue
|
||||
if cinf.get('buildname', None) == 'N/A':
|
||||
continue
|
||||
if '.' not in cinf['fw_ver']:
|
||||
continue
|
||||
bmcinf = {
|
||||
'version': cinf['fw_ver'],
|
||||
'build': cinf['buildname'],
|
||||
'date': cinf['builddate'],
|
||||
}
|
||||
yield (name, bmcinf)
|
||||
gotinfo = True
|
||||
name += ' Backup'
|
||||
if not gotinfo:
|
||||
raise Exception("Unable to retrieve firmware information")
|
||||
if raisebypass:
|
||||
raise exc.BypassGenericBehavior()
|
||||
|
||||
async def get_wc(self):
|
||||
self.fwid = None
|
||||
if self._wc:
|
||||
rsp, status = await self._wc.grab_json_response_with_status(
|
||||
'/api/chassis-status')
|
||||
if status == 200:
|
||||
return self._wc
|
||||
authdata = {
|
||||
'username': self.username,
|
||||
'password': self.password,
|
||||
}
|
||||
wc = webclient.WebConnection(self.tsm, 443,
|
||||
verifycallback=self._certverify,
|
||||
timeout=180)
|
||||
wc.set_header('Content-Type', 'application/json')
|
||||
rsp, status = await wc.grab_json_response_with_status('/api/session',
|
||||
authdata)
|
||||
if status == 403:
|
||||
wc.set_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/session', urlencode(authdata))
|
||||
|
||||
if status < 200 or status >= 300:
|
||||
raise Exception('Error establishing web session')
|
||||
self.csrftok = rsp['CSRFToken']
|
||||
wc.set_header('X-CSRFTOKEN', self.csrftok)
|
||||
self._wc = wc
|
||||
return wc
|
||||
|
||||
async def update_firmware(self, filename, data=None, progress=None, bank=None):
|
||||
wc = await self.get_wc()
|
||||
wc.set_header('Content-Type', 'application/json')
|
||||
basefilename = os.path.basename(filename)
|
||||
if filename.endswith('.hpm'):
|
||||
return await self.update_hpm_firmware(filename, progress, wc, data)
|
||||
elif 'uefi' in basefilename and filename.endswith('.rom'):
|
||||
return await self.update_sys_firmware(filename, progress, wc, data=data)
|
||||
elif 'amd-sas' in basefilename and filename.endswith('.bin'):
|
||||
return await self.update_sys_firmware(filename, progress, wc, data=data,
|
||||
type='bp')
|
||||
elif (('lxpm' in basefilename or 'fw_drv' in basefilename)
|
||||
and filename.endswith('.img')):
|
||||
return await self.update_lxpm_firmware(filename, progress, wc, data)
|
||||
else:
|
||||
raise Exception('Unsupported filename {0}'.format(filename))
|
||||
|
||||
async def update_lxpm_firmware(self, filename, progress, wc, data):
|
||||
hdrs = wc.stdheaders.copy()
|
||||
hdrs['Content-Length'] = 0
|
||||
rsp = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/LXPMUploadMode',
|
||||
method='PUT', headers=hdrs)
|
||||
# name fwimage filname filename application/x-raw-disk-image...
|
||||
fu = await webclient.make_uploader(
|
||||
wc, '/api/maintenance/LXPMUpload',
|
||||
filename, data, formname='fwimage')
|
||||
while not fu.completed():
|
||||
try:
|
||||
await fu.join(3)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
if progress:
|
||||
progress({
|
||||
'phase': 'upload',
|
||||
'progress': 100 * await fu.get_progress()})
|
||||
if progress:
|
||||
progress({
|
||||
'phase': 'apply',
|
||||
'progress': 0.0}
|
||||
)
|
||||
await wc.grab_json_response('/api/maintenance/LXPMImageSplit', {'type': 3})
|
||||
completion = False
|
||||
while not completion:
|
||||
rsp = await wc.grab_json_response('/api/maintenance/LXPMstatus')
|
||||
if rsp.get('state') == 0 and rsp.get('progress') == 4:
|
||||
break
|
||||
await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/Outofflash', method='PUT', headers=hdrs)
|
||||
return 'complete'
|
||||
|
||||
async def update_sys_firmware(self, filename, progress, wc, type='uefi',
|
||||
data=None):
|
||||
if type == 'bp':
|
||||
rsp = await wc.grab_json_response_with_status('/api/chassis-status')
|
||||
if rsp[0]['power_status'] == 1:
|
||||
raise Exception("Cannot update BP firmware while system is on")
|
||||
updatemode = 'BPUploadMode'
|
||||
fileupload = 'BPfileUpload'
|
||||
startit = 'BPUpgradeStart'
|
||||
statusname = 'BPstatus'
|
||||
else:
|
||||
updatemode = 'flash'
|
||||
rsp = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/BIOSremoteSave',
|
||||
{"tftpip": "",
|
||||
"tftpfile": ""}
|
||||
)
|
||||
fileupload = 'firmware/BIOS'
|
||||
startit = 'BIOSstart'
|
||||
statusname = 'BIOSstatus'
|
||||
hdrs = wc.stdheaders.copy()
|
||||
hdrs['Content-Length'] = 0
|
||||
rsp = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/{0}'.format(updatemode),
|
||||
method='PUT', headers=hdrs)
|
||||
fu = await webclient.make_uploader(
|
||||
wc, '/api/maintenance/{0}'.format(fileupload), filename, data,
|
||||
formname='fwimage')
|
||||
while not fu.completed():
|
||||
try:
|
||||
await fu.join(3)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
if progress:
|
||||
progress({
|
||||
'phase': 'upload',
|
||||
'progress': 100 * await fu.get_progress()})
|
||||
if progress:
|
||||
progress({
|
||||
'phase': 'apply',
|
||||
'progress': 0.0}
|
||||
)
|
||||
rsp = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/{0}'.format(startit))
|
||||
applypct = 0.0
|
||||
if rsp[1] >= 200 and rsp[1] < 300 and rsp[0]['wRet'] == 0:
|
||||
updone = False
|
||||
while not updone:
|
||||
rsp = await wc.grab_json_response(
|
||||
'/api/maintenance/{0}'.format(statusname))
|
||||
if rsp.get('state', 0) == 9:
|
||||
break
|
||||
if rsp.get('state', 0) in (6, 10):
|
||||
raise Exception('Update Failure')
|
||||
if (rsp.get('state', 0) == 8
|
||||
and rsp.get('progress', 0) > 0 and progress):
|
||||
progress({
|
||||
'phase': 'apply',
|
||||
'progress': 70 + float(rsp.get(
|
||||
'progress', 0)) / 100 * 30})
|
||||
elif type == 'bp' and rsp.get('state', 0) == 1:
|
||||
break
|
||||
elif progress and applypct < 70:
|
||||
applypct += 1.4
|
||||
progress({'phase': 'apply', 'progress': applypct})
|
||||
if type == 'bp':
|
||||
rsp = await wc.grab_json_response('/api/maintenance/BPfinish')
|
||||
hdrs = wc.stdheaders.copy()
|
||||
hdrs['Content-Length'] = 0
|
||||
rsp = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/Outofflash', method='PUT', headers=hdrs)
|
||||
return 'complete'
|
||||
return 'pending'
|
||||
raise Exception('Update Failure')
|
||||
|
||||
async def update_hpm_firmware(self, filename, progress, wc, data):
|
||||
rsp = await wc.grab_json_response('/api/maintenance/hpm/freemem')
|
||||
if 'MemFree' not in rsp:
|
||||
raise Exception('System Not Ready for update')
|
||||
if filename not in hpm_by_filename:
|
||||
hpminfo = read_hpm(filename, data)
|
||||
if len(hpminfo) != 3:
|
||||
raise Exception(
|
||||
'This HPM update is currently not supported')
|
||||
hpm_by_filename[filename] = read_hpm(filename, data)
|
||||
else:
|
||||
hpminfo = hpm_by_filename[filename]
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/hpm/updatemode', method='PUT')
|
||||
# first segment, make sure it is mmc,
|
||||
# then do the preparecomponents with the following payload
|
||||
if status != 200:
|
||||
raise Exception(rsp)
|
||||
uid = rsp['unique_id']
|
||||
self.fwid = uid
|
||||
payload = {
|
||||
'FWUPDATEID': uid,
|
||||
'COMPONENT_ID': 1,
|
||||
'COMPONENT_DATA_LEN': len(hpminfo[0].data),
|
||||
'IS_MMC': 1,
|
||||
}
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/hpm/preparecomponents', payload, method='PUT')
|
||||
if status < 200 or status >= 300:
|
||||
await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/hpm/exitupdatemode', {'FWUPDATEID': uid},
|
||||
method='PUT')
|
||||
raise Exception(rsp)
|
||||
fu = await webclient.make_uploader(
|
||||
wc, '/api/maintenance/hpm/mmcfw', 'blob', hpminfo[0].data, 'mmc')
|
||||
if progress:
|
||||
progress({'phase': 'upload', 'progress': 0.0})
|
||||
fu.start()
|
||||
while not fu.completed():
|
||||
try:
|
||||
await fu.join(3)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
if progress:
|
||||
progress({
|
||||
'phase': 'upload',
|
||||
'progress': 50 * await fu.get_progress()})
|
||||
del payload['IS_MMC']
|
||||
payload['SECTION_FLASH'] = hpminfo[0].section_flash
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/hpm/flash', payload, method='PUT')
|
||||
percent = 0
|
||||
while percent < 100:
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/hpm/upgradestatus?COMPONENT_ID=1')
|
||||
if status < 200 or status >= 300:
|
||||
raise Exception(rsp)
|
||||
percent = rsp['PROGRESS']
|
||||
if progress:
|
||||
progress({
|
||||
'phase': 'apply',
|
||||
'progress': .5 * percent})
|
||||
if percent < 100:
|
||||
await asyncio.sleep(3)
|
||||
if progress:
|
||||
progress({'phase': 'validating', 'progress': 0.0})
|
||||
del payload['SECTION_FLASH']
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/hpm/verifyimage', payload, method='PUT')
|
||||
percent = 0
|
||||
while percent < 100:
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/hpm/verifyimagestatus?COMPONENT_ID=1')
|
||||
if status < 200 or status >= 300:
|
||||
raise Exception(rsp)
|
||||
percent = rsp['PROGRESS']
|
||||
if progress:
|
||||
progress({
|
||||
'phase': 'validating',
|
||||
'progress': 0.5 * percent})
|
||||
if percent < 100:
|
||||
await asyncio.sleep(3)
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/hpm/exitupdatemode', {'FWUPDATEID': uid},
|
||||
method='PUT')
|
||||
fu = await webclient.make_uploader(wc, '/api/maintenance/firmware/firmware',
|
||||
'blob', hpminfo[1].combo_image, 'fwimage')
|
||||
fu.start()
|
||||
while not fu.completed():
|
||||
try:
|
||||
await fu.join(3)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
if progress:
|
||||
progress({
|
||||
'phase': 'upload',
|
||||
'progress': 50 * await fu.get_progress() + 50})
|
||||
rsp = await wc.grab_json_response('/api/maintenance/firmware/verification')
|
||||
upgradeparms = {
|
||||
'preserve_config': 1,
|
||||
'flash_status': 1,
|
||||
}
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/firmware/upgrade',
|
||||
upgradeparms, method='PUT')
|
||||
if progress:
|
||||
progress({'phase': 'apply', 'progress': 50.0})
|
||||
applied = False
|
||||
while not applied:
|
||||
rsp = await wc.grab_json_response(
|
||||
'/api/maintenance/firmware/flash-progress')
|
||||
percent = float(rsp['progress'].split('%')[0])
|
||||
percent = percent * 0.5 + 50
|
||||
if progress:
|
||||
progress({'phase': 'apply', 'progress': percent})
|
||||
if rsp['progress'] == '100% done' and rsp['state'] == 0:
|
||||
applied = True
|
||||
break
|
||||
await asyncio.sleep(3)
|
||||
hdrs = wc.stdheaders.copy()
|
||||
hdrs['Content-Length'] = 0
|
||||
rsp, status = await wc.grab_json_response_with_status(
|
||||
'/api/maintenance/reset', method='POST', headers=hdrs)
|
||||
self._wc = None
|
||||
return 'complete'
|
||||
|
||||
async def _detach_all_media(self, wc, slots):
|
||||
for slot in slots: # Stop all active redirections to reconfigure
|
||||
if slot['redirection_status'] != 0:
|
||||
await wc.grab_json_response(
|
||||
'/api/settings/media/remote/stop-media',
|
||||
{'image_name': slot['image_name'],
|
||||
'image_type': slot['media_type'],
|
||||
'image_index': slot['media_index']})
|
||||
|
||||
async def detach_remote_media(self):
|
||||
wc = await self.get_wc()
|
||||
slots = await wc.grab_json_response(
|
||||
'/api/settings/media/remote/configurations')
|
||||
await self._detach_all_media(wc, slots)
|
||||
if not self.isipmi:
|
||||
raise exc.BypassGenericBehavior()
|
||||
|
||||
async def _allocate_slot(self, slots, filetype, wc, server, path):
|
||||
currhdds = []
|
||||
currisos = []
|
||||
for slot in slots:
|
||||
if slot['image_name']:
|
||||
if slot['media_type'] == 1:
|
||||
currisos.append(slot['image_name'])
|
||||
elif slot['media_type'] == 4:
|
||||
currhdds.append(slot['image_name'])
|
||||
else:
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Unrecognized mounted image: ' + repr(slot))
|
||||
hddslots = len(currhdds)
|
||||
cdslots = len(currisos)
|
||||
if filetype == 1:
|
||||
cdslots += 1
|
||||
elif filetype == 4:
|
||||
hddslots += 1
|
||||
else:
|
||||
raise exc.UnsupportedFunctionality('Unknown slot type requested')
|
||||
gensettings = wc.grab_json_response('/api/settings/media/general')
|
||||
samesettings = gensettings['same_settings'] == 1
|
||||
if samesettings:
|
||||
hds = gensettings['cd_remote_server_address']
|
||||
hdp = gensettings['cd_remote_source_path'].replace('\\/', '/')
|
||||
else:
|
||||
hds = gensettings['hd_remote_server_address']
|
||||
hdp = gensettings['hd_remote_source_path'].replace('\\/', '/')
|
||||
if filetype == 1 and (currisos or (samesettings and currhdds)):
|
||||
if gensettings['cd_remote_server_address'] != server:
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Cannot mount ISO images from multiple '
|
||||
'servers at a time')
|
||||
if gensettings['cd_remote_source_path'].replace(
|
||||
'\\/', '/') != path:
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Cannot mount ISO images from different '
|
||||
'directories at a time')
|
||||
if filetype == 4 and currhdds:
|
||||
if hds != server:
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Cannot mount IMG images from multiple servers at a time')
|
||||
if hdp != path:
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Cannot mount IMG images from muliple directories at a '
|
||||
'time')
|
||||
await self._detach_all_media(wc, slots)
|
||||
if filetype == 1 or (samesettings and currhdds):
|
||||
gensettings['cd_remote_server_address'] = server
|
||||
gensettings['cd_remote_source_path'] = path
|
||||
gensettings['cd_remote_share_type'] = 'nfs'
|
||||
gensettings['mount_cd'] = 1
|
||||
elif filetype == 4:
|
||||
gensettings['same_settings'] = 0
|
||||
gensettings['hd_remote_server_address'] = server
|
||||
gensettings['hd_remote_source_path'] = path
|
||||
gensettings['hd_remote_share_type'] = 'nfs'
|
||||
gensettings['mount_hd'] = 1
|
||||
gensettings['remote_media_support'] = 1
|
||||
gensettings['cd_remote_password'] = ''
|
||||
gensettings['hd_remote_password'] = ''
|
||||
wc.grab_json_response_with_status('/api/settings/media/general',
|
||||
gensettings, method='PUT')
|
||||
# need to calibrate instances correctly
|
||||
currinfo, status = wc.grab_json_response_with_status(
|
||||
'/api/settings/media/instance')
|
||||
currinfo['num_cd'] = cdslots
|
||||
currinfo['num_hd'] = hddslots
|
||||
if currinfo['kvm_num_cd'] > cdslots:
|
||||
currinfo['kvm_num_cd'] = cdslots
|
||||
if currinfo['kvm_num_hd'] > hddslots:
|
||||
currinfo['kvm_num_hd'] = hddslots
|
||||
wc.grab_json_response_with_status(
|
||||
'/api/settings/media/instance', currinfo, method='PUT')
|
||||
images = wc.grab_json_response('/api/settings/media/remote/images')
|
||||
tries = 20
|
||||
while tries and not images:
|
||||
tries -= 1
|
||||
await asyncio.sleep(1)
|
||||
images = await wc.grab_json_response('/api/settings/media/remote/images')
|
||||
for iso in currisos:
|
||||
await self._exec_mount(iso, images, wc)
|
||||
for iso in currhdds:
|
||||
await self._exec_mount(iso, images, wc)
|
||||
|
||||
async def _exec_mount(self, name, images, wc):
|
||||
for img in images:
|
||||
if img['image_name'] == name:
|
||||
break
|
||||
else:
|
||||
raise exc.InvalidParameterValue(
|
||||
'Unable to locate image {0}'.format(name))
|
||||
await wc.grab_json_response(
|
||||
'/api/settings/media/remote/start-media',
|
||||
{'image_name': name, 'image_type': img['image_type'],
|
||||
'image_index': img['image_index']})
|
||||
|
||||
async def upload_media(self, filename, progress=None, data=None):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Remote media upload not supported on this system')
|
||||
|
||||
async def list_media(self, fishclient=None, cache=True):
|
||||
wc = await self.get_wc()
|
||||
rsp = await wc.grab_json_response('/api/settings/media/general')
|
||||
cds = rsp['cd_remote_server_address']
|
||||
cdpath = rsp['cd_remote_source_path']
|
||||
cdproto = rsp['cd_remote_share_type']
|
||||
if rsp['same_settings'] == 1:
|
||||
hds = cds
|
||||
hdpath = cdpath
|
||||
hdproto = cdproto
|
||||
else:
|
||||
hds = rsp['hd_remote_server_address']
|
||||
hdpath = rsp['hd_remote_source_path']
|
||||
hdproto = rsp['hd_remote_share_type']
|
||||
slots = await wc.grab_json_response(
|
||||
'/api/settings/media/remote/configurations')
|
||||
for slot in slots:
|
||||
if slot['redirection_status'] == 1:
|
||||
url = None
|
||||
if slot['media_type'] == 1:
|
||||
url = '{0}://{1}{2}'.format(
|
||||
cdproto, cds, cdpath)
|
||||
elif slot['media_type'] == 4:
|
||||
url = '{0}://{1}{2}'.format(
|
||||
hdproto, hds, hdpath)
|
||||
if url:
|
||||
yield media.Media(slot['image_name'], url)
|
||||
|
||||
async def attach_remote_media(self, url, user, password, vmurls):
|
||||
if not url.startswith('nfs://'):
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Only nfs:// urls are supported by this system')
|
||||
path = url.replace('nfs://', '')
|
||||
server, path = path.split('/', 1)
|
||||
path, filename = path.rsplit('/', 1)
|
||||
path = '/' + path
|
||||
filetype = filename.rsplit('.')[-1]
|
||||
if filetype == 'iso':
|
||||
filetype = 1
|
||||
elif filetype == 'img':
|
||||
filetype = 4
|
||||
else:
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'Only iso and img files supported')
|
||||
wc = await self.get_wc()
|
||||
mountslots = await wc.grab_json_response(
|
||||
'/api/settings/media/remote/configurations')
|
||||
images = await wc.grab_json_response('/api/settings/media/remote/images')
|
||||
currtypeenabled = False
|
||||
for slot in mountslots:
|
||||
if slot['image_name'] == filename:
|
||||
return # Already mounted...
|
||||
for img in images:
|
||||
if img['image_name'] == filename:
|
||||
break
|
||||
if img['image_type'] == filetype:
|
||||
currtypeenabled = True
|
||||
else:
|
||||
if currtypeenabled:
|
||||
raise exc.UnsupportedFunctionality(
|
||||
'This system cannot mount images '
|
||||
'from different locations at the same time')
|
||||
img = None
|
||||
for slot in mountslots:
|
||||
if slot['media_type'] != filetype:
|
||||
continue
|
||||
if slot['redirection_status'] == 0:
|
||||
break
|
||||
else:
|
||||
await self._allocate_slot(mountslots, filetype, wc, server, path)
|
||||
images = await wc.grab_json_response('/api/settings/media/remote/images')
|
||||
await self._exec_mount(filename, images, wc)
|
||||
if not self.isipmi:
|
||||
raise exc.BypassGenericBehavior()
|
||||
1980
confluent_server/aiohmi/redfish/oem/lenovo/xcc.py
Normal file
1980
confluent_server/aiohmi/redfish/oem/lenovo/xcc.py
Normal file
File diff suppressed because it is too large
Load Diff
1213
confluent_server/aiohmi/redfish/oem/lenovo/xcc3.py
Normal file
1213
confluent_server/aiohmi/redfish/oem/lenovo/xcc3.py
Normal file
File diff suppressed because it is too large
Load Diff
47
confluent_server/aiohmi/redfish/oem/lookup.py
Normal file
47
confluent_server/aiohmi/redfish/oem/lookup.py
Normal file
@@ -0,0 +1,47 @@
|
||||
# Copyright 2019 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import aiohmi.redfish.oem.dell.main as dell
|
||||
import aiohmi.redfish.oem.generic as generic
|
||||
import aiohmi.redfish.oem.lenovo.main as lenovo
|
||||
import aiohmi.redfish.oem.ami.main as ami
|
||||
|
||||
OEMMAP = {
|
||||
'Lenovo': lenovo,
|
||||
'Dell': dell,
|
||||
'AMI': ami,
|
||||
'Ami': ami,
|
||||
}
|
||||
|
||||
|
||||
def get_oem_handler(sysinfo, sysurl, webclient, cache, cmd, rootinfo={}):
|
||||
if rootinfo.get('Vendor', None) in OEMMAP:
|
||||
return OEMMAP[rootinfo['Vendor']].get_handler(sysinfo, sysurl,
|
||||
webclient, cache, cmd, rootinfo)
|
||||
for oem in sysinfo.get('Oem', {}):
|
||||
if oem in OEMMAP:
|
||||
return OEMMAP[oem].get_handler(sysinfo, sysurl, webclient, cache,
|
||||
cmd, rootinfo)
|
||||
for oem in sysinfo.get('Links', {}).get('OEM', []):
|
||||
if oem in OEMMAP:
|
||||
return OEMMAP[oem].get_handler(sysinfo, sysurl, webclient, cache,
|
||||
cmd, rootinfo)
|
||||
if rootinfo: # rootinfo indicates early invocation, bmcinfo not ready yet
|
||||
return generic.OEMHandler(sysinfo, sysurl, webclient, cache, cmd._gpool, rootinfo)
|
||||
bmcinfo = cmd.bmcinfo
|
||||
for oem in bmcinfo.get('Oem', {}):
|
||||
if oem in OEMMAP:
|
||||
return OEMMAP[oem].get_handler(sysinfo, sysurl, webclient, cache,
|
||||
cmd, rootinfo)
|
||||
return generic.OEMHandler(sysinfo, sysurl, webclient, cache, cmd._gpool, rootinfo)
|
||||
113
confluent_server/aiohmi/storage.py
Normal file
113
confluent_server/aiohmi/storage.py
Normal file
@@ -0,0 +1,113 @@
|
||||
# Copyright 2017 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class Disk(object):
|
||||
def __init__(self, name, description=None, id=None, status=None,
|
||||
serial=None, fru=None, stripsize=None):
|
||||
"""Define a disk object
|
||||
|
||||
:param name: A name describing the disk in human readable terms
|
||||
:param description: A description of the device
|
||||
:param id: Identifier used by the controller
|
||||
:param status: Controller indicated status of disk
|
||||
:param serial: Serial number of the drive
|
||||
:param fru: FRU number of the driver
|
||||
:param stripsize: The stripsize of the disk in kibibytes
|
||||
"""
|
||||
self.name = str(name)
|
||||
self.description = description
|
||||
self.id = id
|
||||
self.status = status
|
||||
self.serial = serial
|
||||
self.fru = fru
|
||||
self.stripsize = stripsize
|
||||
|
||||
|
||||
class Array(object):
|
||||
def __init__(self, disks=None, raid=None, status=None, volumes=(), id=None,
|
||||
spans=None, hotspares=(), capacity=None,
|
||||
available_capacity=None):
|
||||
"""Define an array of disks object
|
||||
|
||||
:param disks: An array of Disk objects
|
||||
:param raid: the RAID level
|
||||
:param status: Status of the array according to the controller
|
||||
:param id: Unique identifier used by controller to identify
|
||||
:param spans: Number of spans for a multi-dimensional array
|
||||
:param hotspares: List of Disk objects that are dedicated hot spares
|
||||
for this array.
|
||||
:param capacity: the total capacity of the array
|
||||
:param available_capacity: the remaining capacity of the array
|
||||
"""
|
||||
self.disks = disks
|
||||
self.raid = raid
|
||||
self.status = status
|
||||
self.id = id
|
||||
self.volumes = volumes
|
||||
self.spans = spans
|
||||
self.hotspares = hotspares
|
||||
self.capacity = capacity
|
||||
self.available_capacity = available_capacity
|
||||
|
||||
|
||||
class Volume(object):
|
||||
def __init__(self, name=None, size=None, status=None, id=None,
|
||||
stripsize=None, read_policy=None, write_policy=None,
|
||||
default_init=None):
|
||||
"""Define a Volume as an object
|
||||
|
||||
:param name: Name of the volume
|
||||
:param size: Size of the volume in MB
|
||||
:param status: Controller indicated status of the volume
|
||||
:param id: Controller identifier of a given volume
|
||||
:param stripsize: The stripsize of the volume in kibibytes
|
||||
:param read_policy: The read policy of the volume
|
||||
:param write_policy: The write policy of the volume
|
||||
:param default_init: The default initialization of the volume
|
||||
"""
|
||||
self.name = name
|
||||
if isinstance(size, int):
|
||||
self.size = size
|
||||
else:
|
||||
strsize = str(size).lower()
|
||||
if strsize.endswith('mb'):
|
||||
self.size = int(strsize.replace('mb', ''))
|
||||
elif strsize.endswith('gb'):
|
||||
self.size = int(strsize.replace('gb', '')) * 1000
|
||||
elif strsize.endswith('tb'):
|
||||
self.size = int(strsize.replace('tb', '')) * 1000 * 1000
|
||||
else:
|
||||
self.size = size
|
||||
self.status = status
|
||||
self.id = id
|
||||
self.stripsize = stripsize
|
||||
self.read_policy = read_policy
|
||||
self.write_policy = write_policy
|
||||
self.default_init = default_init
|
||||
|
||||
|
||||
class ConfigSpec(object):
|
||||
def __init__(self, disks=(), arrays=()):
|
||||
"""A configuration specification of storage
|
||||
|
||||
When returned from a remote system, it describes the current config.
|
||||
When given to a remote system, it should only describe the delta
|
||||
between current config.
|
||||
|
||||
:param disks: A list of Disk in the configuration not in an array
|
||||
:param arrays: A list of Array objects
|
||||
"""
|
||||
self.disks = disks
|
||||
self.arrays = arrays
|
||||
0
confluent_server/aiohmi/tests/__init__.py
Normal file
0
confluent_server/aiohmi/tests/__init__.py
Normal file
0
confluent_server/aiohmi/tests/unit/__init__.py
Normal file
0
confluent_server/aiohmi/tests/unit/__init__.py
Normal file
21
confluent_server/aiohmi/tests/unit/base.py
Normal file
21
confluent_server/aiohmi/tests/unit/base.py
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslotest import base
|
||||
|
||||
|
||||
class TestCase(base.BaseTestCase):
|
||||
|
||||
"""Test case base class for all unit tests."""
|
||||
0
confluent_server/aiohmi/tests/unit/ipmi/__init__.py
Normal file
0
confluent_server/aiohmi/tests/unit/ipmi/__init__.py
Normal file
23
confluent_server/aiohmi/tests/unit/ipmi/test_sdr.py
Normal file
23
confluent_server/aiohmi/tests/unit/ipmi/test_sdr.py
Normal file
@@ -0,0 +1,23 @@
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from aiohmi.ipmi import sdr
|
||||
from aiohmi.tests.unit import base
|
||||
|
||||
|
||||
class SDRTestCase(base.TestCase):
|
||||
|
||||
def test_ones_complement(self):
|
||||
self.assertEqual(sdr.ones_complement(127, 8), 127)
|
||||
1
confluent_server/aiohmi/util/__init__.py
Normal file
1
confluent_server/aiohmi/util/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
__author__ = 'jjohnson2'
|
||||
66
confluent_server/aiohmi/util/parse.py
Normal file
66
confluent_server/aiohmi/util/parse.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# Copyright 2019 Lenovo Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
|
||||
from dateutil import tz
|
||||
|
||||
|
||||
def parse_time(timeval):
|
||||
if timeval is None:
|
||||
return None
|
||||
try:
|
||||
if '+' not in timeval and len(timeval.split('-')) <= 3:
|
||||
retval = datetime.strptime(timeval, '%Y-%m-%dT%H:%M:%SZ')
|
||||
return retval.replace(tzinfo=tz.tzutc())
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
positive = None
|
||||
offset = None
|
||||
if '+' in timeval:
|
||||
timeval, offset = timeval.split('+', 1)
|
||||
positive = 1
|
||||
elif len(timeval.split('-')) > 3:
|
||||
timeval, offset = timeval.rsplit('-', 1)
|
||||
positive = -1
|
||||
if positive:
|
||||
hrs, mins = offset.split(':', 1)
|
||||
secs = int(hrs) * 60 + int(mins)
|
||||
secs = secs * 60 * positive
|
||||
ms = None
|
||||
if '.' in timeval:
|
||||
timeval, ms = timeval.split('.', 1)
|
||||
ms = int(ms)
|
||||
ms = timedelta(0, 0, 0, ms)
|
||||
retval = datetime.strptime(timeval, '%Y-%m-%dT%H:%M:%S')
|
||||
if ms:
|
||||
retval += ms
|
||||
return retval.replace(tzinfo=tz.tzoffset('', secs))
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return datetime.strptime(timeval, '%Y-%m-%dT%H:%M:%S')
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return datetime.strptime(timeval, '%Y-%m-%d')
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return datetime.strptime(timeval, '%m/%d/%Y')
|
||||
except ValueError:
|
||||
pass
|
||||
return None
|
||||
441
confluent_server/aiohmi/util/webclient.py
Normal file
441
confluent_server/aiohmi/util/webclient.py
Normal file
@@ -0,0 +1,441 @@
|
||||
# Copyright 2015-2019 Lenovo
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This provides ability to do HTTPS in a manner like ssh host keys for the
|
||||
# sake of typical internal management devices. Compatibility back to python
|
||||
# 2.6 as is found in commonly used enterprise linux distributions.
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import copy
|
||||
import gzip
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
from yarl import URL
|
||||
import aiohmi.exceptions as pygexc
|
||||
|
||||
|
||||
import aiohttp
|
||||
from aiohttp.cookiejar import CookieJar
|
||||
|
||||
import http.client as httplib
|
||||
import http.cookies as Cookie
|
||||
|
||||
# Used as the separator for form data
|
||||
|
||||
# We will frequently be dealing with the same data across many instances,
|
||||
# consolidate forms to single memory location to get benefits..
|
||||
uploadforms = {}
|
||||
|
||||
class CustomVerifier(aiohttp.Fingerprint):
|
||||
def __init__(self, verifycallback):
|
||||
self._certverify = verifycallback
|
||||
|
||||
def check(self, transport):
|
||||
sslobj = transport.get_extra_info("ssl_object")
|
||||
cert = sslobj.getpeercert(binary_form=True)
|
||||
try:
|
||||
if not self._certverify(cert):
|
||||
raise pygexc.UnrecognizedCertificate('Unknown certificate',
|
||||
cert)
|
||||
except Exception:
|
||||
transport.close()
|
||||
raise
|
||||
|
||||
class Downloader:
|
||||
@classmethod
|
||||
def create(cls, filehandle):
|
||||
self = cls()
|
||||
self.contentlen = None
|
||||
self._completed = False
|
||||
self._filehandle = filehandle
|
||||
self._xfertask = None
|
||||
self.exc = None
|
||||
return self
|
||||
|
||||
async def get_progress(self):
|
||||
if self.contentlen is None:
|
||||
return -0.5
|
||||
offset = None
|
||||
retries = 100
|
||||
while offset is None:
|
||||
if self._completed:
|
||||
return 1.0
|
||||
try:
|
||||
offset = self._filehandle.tell()
|
||||
except ValueError:
|
||||
retries -= 1
|
||||
if retries <= 0:
|
||||
return -0.5
|
||||
await asyncio.sleep(0.01)
|
||||
return float(offset) / float(self.contentlen)
|
||||
|
||||
def mark_completed(self, fut):
|
||||
self._completed = True
|
||||
|
||||
def set_task(self, task):
|
||||
self._xfertask = task
|
||||
|
||||
def completed(self):
|
||||
return self._completed
|
||||
|
||||
async def join(self, timeout=None):
|
||||
if self._xfertask is None:
|
||||
return
|
||||
if timeout is None:
|
||||
await self._xfertask
|
||||
else:
|
||||
await asyncio.wait_for(asyncio.shield(self._xfertask), timeout=timeout)
|
||||
|
||||
class Uploader(Downloader):
|
||||
@classmethod
|
||||
async def create(cls, filename, data=None, formname=None,
|
||||
otherfields=(), formwrap=True):
|
||||
self = cls()
|
||||
self._response = None
|
||||
self._statuscode = None
|
||||
self._xfertask = None
|
||||
self._completed = False
|
||||
self._rspheaders = None
|
||||
self.rsp = ''
|
||||
self.rspstatus = 500
|
||||
self.filename = filename
|
||||
if data:
|
||||
self.data = data
|
||||
else:
|
||||
self.data = open(filename, 'rb')
|
||||
self.formname = formname
|
||||
self.otherfields = otherfields
|
||||
self.ulheaders = {}
|
||||
if formwrap:
|
||||
guf = await get_upload_form(
|
||||
filename, self.data, formname, otherfields)
|
||||
self._upbuffer = io.BytesIO(guf[0])
|
||||
self._boundary = guf[1]
|
||||
self.ulsize = len(uploadforms[filename][0])
|
||||
self.ulheaders['Content-Type'] = 'multipart/form-data; boundary={0}'.format(
|
||||
self._boundary.decode('utf-8'))
|
||||
self.ulheaders['Content-Size'] = str(self.ulsize)
|
||||
else:
|
||||
canseek = True
|
||||
try:
|
||||
curroff = self.data.tell()
|
||||
except Exception:
|
||||
canseek = False
|
||||
databytes = await asyncio.to_thread(self.data.read)
|
||||
self.ulsize = len(databytes)
|
||||
self._upbuffer = io.BytesIO(databytes)
|
||||
if canseek:
|
||||
self.data.seek(0, 2)
|
||||
self.ulsize = self.data.tell() - curroff
|
||||
self.data.seek(curroff, 0)
|
||||
self._upbuffer = self.data
|
||||
self.ulheaders['Content-Length'] = str(self.ulsize)
|
||||
self.ulheaders['Content-Type'] = 'application/octet-stream'
|
||||
return self
|
||||
|
||||
def set_response(self, statuscode, response, headers):
|
||||
self._statuscode = statuscode
|
||||
self._response = response
|
||||
self._rspheaders = headers
|
||||
|
||||
def get_response(self):
|
||||
return self._statuscode, self._response, self._rspheaders
|
||||
|
||||
def get_buffer(self):
|
||||
return self._upbuffer
|
||||
|
||||
def get_headers(self):
|
||||
return self.ulheaders
|
||||
|
||||
def get_size(self):
|
||||
return self.ulsize
|
||||
|
||||
def close(self):
|
||||
if self.filename in uploadforms:
|
||||
try:
|
||||
del uploadforms[self.filename]
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
self.data.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async def get_progress(self):
|
||||
if self._completed:
|
||||
return 1.0
|
||||
if self._xfertask is None:
|
||||
return 0.0
|
||||
|
||||
totalen = self.get_size()
|
||||
if totalen is None:
|
||||
return -0.5
|
||||
offset = None
|
||||
tries = 100
|
||||
while offset is None:
|
||||
if self._completed:
|
||||
return 1.0
|
||||
if self._xfertask is None:
|
||||
return 0.0
|
||||
try:
|
||||
offset = self._upbuffer.tell()
|
||||
except ValueError:
|
||||
await asyncio.sleep(0.01)
|
||||
tries -= 1
|
||||
if tries <= 0:
|
||||
return -0.5
|
||||
return float(offset) / float(totalen)
|
||||
|
||||
def make_downloader(webconn, url, dlfile):
|
||||
if isinstance(dlfile, str):
|
||||
dlfile = open(dlfile, 'wb')
|
||||
dler = Downloader.create(dlfile)
|
||||
tsk = asyncio.create_task(webconn.download(url, dlfile, dler))
|
||||
dler.set_task(tsk)
|
||||
tsk.add_done_callback(dler.mark_completed)
|
||||
return dler
|
||||
|
||||
async def make_uploader(webconn, url, filename, data=None, formname=None,
|
||||
otherfields=(), formwrap=True):
|
||||
uler = await Uploader.create(filename, data, formname, otherfields, formwrap)
|
||||
tsk = asyncio.create_task(webconn.upload(
|
||||
url, filename, uler.get_buffer(), uploader=uler))
|
||||
uler.set_task(tsk)
|
||||
tsk.add_done_callback(uler.mark_completed)
|
||||
return uler
|
||||
|
||||
|
||||
|
||||
|
||||
async def get_upload_form(filename, data, formname, otherfields, boundary=None):
|
||||
if not boundary:
|
||||
boundary = base64.urlsafe_b64encode(os.urandom(54))[:66]
|
||||
ffilename = filename.split('/')[-1]
|
||||
if not formname:
|
||||
formname = ffilename
|
||||
while uploadforms.get(filename, None) == 'pending':
|
||||
await asyncio.sleep(0.1)
|
||||
try:
|
||||
return uploadforms[filename]
|
||||
except KeyError:
|
||||
uploadforms[filename] = 'pending'
|
||||
try:
|
||||
data = await asyncio.to_thread(data.read)
|
||||
except AttributeError:
|
||||
pass
|
||||
return await asyncio.to_thread(assign_upload_form, filename, ffilename, data, formname, otherfields, boundary)
|
||||
|
||||
def assign_upload_form(filename, ffilename, data, formname, otherfields, boundary=None):
|
||||
form = b''
|
||||
for ofield in otherfields:
|
||||
tfield = otherfields[ofield]
|
||||
xtra=''
|
||||
if isinstance(tfield, dict):
|
||||
tfield = json.dumps(tfield)
|
||||
xtra = '\r\nContent-Type: application/json'
|
||||
form += (b'--' + boundary
|
||||
+ '\r\nContent-Disposition: form-data; '
|
||||
'name="{0}"{1}\r\n\r\n{2}\r\n'.format(
|
||||
ofield, xtra, tfield).encode('utf-8'))
|
||||
form += (b'--' + boundary
|
||||
+ '\r\nContent-Disposition: form-data; '
|
||||
'name="{0}"; filename="{1}"\r\n'.format(
|
||||
formname, ffilename).encode('utf-8'))
|
||||
form += b'Content-Type: application/octet-stream\r\n\r\n' + data
|
||||
form += b'\r\n--' + boundary + b'--\r\n'
|
||||
uploadforms[filename] = form, boundary
|
||||
return uploadforms[filename]
|
||||
|
||||
|
||||
class WebConnection:
|
||||
def __init__(self, host, port, verifycallback=None, timeout=None):
|
||||
self.port = port
|
||||
self.thehost = host
|
||||
if ':' in host and '[' not in host:
|
||||
self.host = f'[{host}]'
|
||||
else:
|
||||
self.host = host
|
||||
if verifycallback:
|
||||
self.ssl = CustomVerifier(verifycallback)
|
||||
else:
|
||||
self.ssl = None
|
||||
self.verifycallback = verifycallback
|
||||
if isinstance(timeout, (int, float)):
|
||||
self.timeout = aiohttp.ClientTimeout(total=timeout)
|
||||
else:
|
||||
self.timeout = timeout
|
||||
self.stdheaders = {}
|
||||
if '[' not in host and '%' in host:
|
||||
self.stdheaders['Host'] = '[' + host.split('%', 1)[0] + ']'
|
||||
self.cookies = CookieJar(quote_cookie=False, unsafe=True)
|
||||
|
||||
def set_timeout(self, timeout):
|
||||
if isinstance(timeout, (int, float)):
|
||||
self.timeout = aiohttp.ClientTimeout(total=timeout)
|
||||
else:
|
||||
self.timeout = timeout
|
||||
|
||||
def get_timeout(self):
|
||||
return self.timeout
|
||||
|
||||
def set_header(self, key, value):
|
||||
self.stdheaders[key] = value
|
||||
|
||||
def dupe(self, timeout=None):
|
||||
newwc = WebConnection(self.host, self.port,
|
||||
verifycallback=self.verifycallback, timeout=timeout or self.timeout)
|
||||
newwc.stdheaders = self.stdheaders.copy()
|
||||
newwc.cookies = CookieJar(quote_cookie=False, unsafe=True)
|
||||
for cookie in self.cookies:
|
||||
newwc.cookies.update_cookies(
|
||||
{cookie.key: cookie.value}, response_url=URL(f'https://{self.host}:{self.port}/'))
|
||||
return newwc
|
||||
|
||||
async def request(
|
||||
self, method, url, body=None, headers=None, referer=None):
|
||||
if headers is None:
|
||||
headers = self.stdheaders.copy()
|
||||
else:
|
||||
headers = headers.copy()
|
||||
if method == 'GET' and 'Content-Type' in headers:
|
||||
del headers['Content-Type']
|
||||
if method == 'POST' and body and 'Content-Type' not in headers:
|
||||
headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||
if body and 'Content-Length' not in headers:
|
||||
headers['Content-Length'] = len(body)
|
||||
if referer:
|
||||
headers['referer'] = referer
|
||||
method = method.lower()
|
||||
async with aiohttp.ClientSession(
|
||||
f'https://{self.host}:{self.port}', cookie_jar=self.cookies, timeout=self.timeout) as session:
|
||||
thefunc = getattr(session, method)
|
||||
kwargs = {}
|
||||
if isinstance(body, dict):
|
||||
kwargs['json'] = body
|
||||
elif body:
|
||||
kwargs['data'] = body
|
||||
async with thefunc(url, headers=headers, ssl=self.ssl, **kwargs) as rsp:
|
||||
pass
|
||||
|
||||
def set_basic_credentials(self, username, password):
|
||||
if isinstance(username, bytes) and not isinstance(username, str):
|
||||
username = username.decode('utf-8')
|
||||
if isinstance(password, bytes) and not isinstance(password, str):
|
||||
password = password.decode('utf-8')
|
||||
authinfo = ':'.join((username, password))
|
||||
if not isinstance(authinfo, bytes):
|
||||
authinfo = authinfo.encode('utf-8')
|
||||
authinfo = base64.b64encode(authinfo)
|
||||
if not isinstance(authinfo, str):
|
||||
authinfo = authinfo.decode('utf-8')
|
||||
self.stdheaders['Authorization'] = 'Basic {0}'.format(authinfo)
|
||||
|
||||
async def grab_json_response(self, url, data=None, referer=None, headers=None):
|
||||
self.lastjsonerror = None
|
||||
body, status = await self.grab_json_response_with_status(
|
||||
url, data, referer, headers)
|
||||
if status == 200:
|
||||
return body
|
||||
self.lastjsonerror = body
|
||||
return {}
|
||||
|
||||
async def grab_json_response_with_status(self, url, data=None, referer=None,
|
||||
headers=None, method=None):
|
||||
rsp, status, hdrs = await self.grab_response_with_status(url, data, referer, headers, method, expect_type='json')
|
||||
return rsp, status
|
||||
|
||||
async def grab_response_with_status(self, url, data=None, referer=None,
|
||||
headers=None, method=None, expect_type=None):
|
||||
if not headers:
|
||||
headers = self.stdheaders.copy()
|
||||
else:
|
||||
headers = headers.copy()
|
||||
if referer:
|
||||
headers['referer'] = referer
|
||||
if not method:
|
||||
method = 'POST' if data is not None else 'GET'
|
||||
method = method.lower()
|
||||
if 'Content-Type' in headers and method.lower() in ('get', 'delete'):
|
||||
del headers['Content-Type']
|
||||
async with aiohttp.ClientSession(f'https://{self.host}:{self.port}', cookie_jar=self.cookies, timeout=self.timeout) as session:
|
||||
thefunc = getattr(session, method)
|
||||
kwargs = {}
|
||||
if isinstance(data, dict):
|
||||
kwargs['json'] = data
|
||||
if 'Content-Type' not in headers:
|
||||
headers['Content-Type'] = 'application/json'
|
||||
elif data is not None:
|
||||
kwargs['data'] = data
|
||||
async with thefunc(url, headers=headers, ssl=self.ssl, **kwargs) as rsp:
|
||||
if rsp.status >= 200 and rsp.status < 300:
|
||||
if expect_type == 'json':
|
||||
return await rsp.json(content_type=''), rsp.status, rsp.headers
|
||||
elif expect_type == 'text':
|
||||
return await rsp.text(), rsp.status, rsp.headers
|
||||
else:
|
||||
return await rsp.read(), rsp.status, rsp.headers
|
||||
else:
|
||||
return await rsp.read(), rsp.status, rsp.headers
|
||||
|
||||
async def download(self, url, dlfile, downloader=None):
|
||||
"""Download a file to filename or file object
|
||||
|
||||
"""
|
||||
if isinstance(dlfile, str):
|
||||
dlfile = open(dlfile, 'wb')
|
||||
dlheaders = self.stdheaders.copy()
|
||||
if 'Accept-Encoding' in dlheaders:
|
||||
del dlheaders['Accept-Encoding']
|
||||
async with aiohttp.ClientSession(f'https://{self.host}:{self.port}', cookie_jar=self.cookies, timeout=self.timeout) as session:
|
||||
async with session.get(url, headers=dlheaders, ssl=self.ssl) as rsp:
|
||||
if downloader:
|
||||
downloader.contentlen = rsp.headers.get('content-length', None)
|
||||
try:
|
||||
downloader.contentlen = int(downloader.contentlen)
|
||||
except Exception:
|
||||
downloader.contentlen = None
|
||||
async for chunk in rsp.content.iter_chunked(16384):
|
||||
dlfile.write(chunk)
|
||||
dlfile.close()
|
||||
|
||||
async def upload(self, url, ulfile, data=None, uploader=None):
|
||||
upheaders = self.stdheaders.copy()
|
||||
if uploader:
|
||||
upheaders.update(uploader.get_headers())
|
||||
data = uploader.get_buffer()
|
||||
else:
|
||||
raise Exception("Not implemented without uploader handler")
|
||||
async with aiohttp.ClientSession(f'https://{self.host}:{self.port}', cookie_jar=self.cookies, timeout=self.timeout) as session:
|
||||
async with session.post(url, headers=upheaders, ssl=self.ssl, data=data) as rsp:
|
||||
if rsp.status >= 200 and rsp.status < 300:
|
||||
expect_type = rsp.headers.get('Content-Type', '')
|
||||
if 'json' in expect_type:
|
||||
uploader.set_response(rsp.status, await rsp.json(content_type=''), rsp.headers)
|
||||
elif 'text' in expect_type:
|
||||
uploader.set_response(rsp.status, await rsp.text(), rsp.headers)
|
||||
else:
|
||||
uploader.set_response(rsp.status, await rsp.read(), rsp.headers)
|
||||
else:
|
||||
uploader.set_response(rsp.status, await rsp.read(), rsp.headers)
|
||||
if uploader:
|
||||
uploader.close()
|
||||
return uploader._statuscode
|
||||
|
||||
18
confluent_server/aiohmi/version.py
Normal file
18
confluent_server/aiohmi/version.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright 2017 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pbr.version
|
||||
|
||||
version_info = pbr.version.VersionInfo('aiohmi')
|
||||
@@ -21,7 +21,7 @@ Requires: confluent_vtbufferd
|
||||
Requires: python3-asyncssh, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-dns, python3-webauthn, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute
|
||||
%else
|
||||
%if "%{dist}" == ".el10"
|
||||
Requires: python3-asyncssh, confluent_client == %{version}, python3-pyparsing, python3-dns, python3-psutil, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml,python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute python3-yarl python3-aiohmi python3-aiohttp
|
||||
Requires: python3-asyncssh, confluent_client == %{version}, python3-pyparsing, python3-dns, python3-psutil, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml,python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute python3-yarl python3-aiohttp
|
||||
%endif
|
||||
%endif
|
||||
|
||||
@@ -58,10 +58,6 @@ rmdir $RPM_BUILD_ROOT/etc
|
||||
systemd-analyze verify $RPM_BUILD_ROOT/usr/lib/systemd/system/confluent.service 2>&1 | grep "'AmbientCapabilities'" > /dev/null && sed -e 's/User=.*//' -e 's/Group=.*//' -e 's/AmbientCapabilities=.*//' -i $RPM_BUILD_ROOT/usr/lib/systemd/system/confluent.service
|
||||
cat INSTALLED_FILES
|
||||
|
||||
%triggerin -- python-aiohmi, python3-aiohmi
|
||||
if [ -x /usr/bin/systemctl ]; then /usr/bin/systemctl try-restart confluent >& /dev/null; fi
|
||||
true
|
||||
|
||||
%pre
|
||||
getent group confluent > /dev/null || /usr/sbin/groupadd -r confluent
|
||||
getent passwd confluent > /dev/null || /usr/sbin/useradd -r -g confluent -d /var/lib/confluent -s /sbin/nologin confluent
|
||||
|
||||
@@ -22,7 +22,19 @@ setup(
|
||||
'confluent/plugins/info/',
|
||||
'confluent/plugins/shell/',
|
||||
'confluent/collective/',
|
||||
'confluent/plugins/configuration/'],
|
||||
'confluent/plugins/configuration/',
|
||||
'aiohmi',
|
||||
'aiohmi/cmd',
|
||||
'aiohmi/ipmi',
|
||||
'aiohmi/ipmi/oem',
|
||||
'aiohmi/ipmi/oem/lenovo',
|
||||
'aiohmi/ipmi/private',
|
||||
'aiohmi/redfish',
|
||||
'aiohmi/redfish/oem',
|
||||
'aiohmi/redfish/oem/dell',
|
||||
'aiohmi/redfish/oem/lenovo',
|
||||
'aiohmi/redfish/oem/ami',
|
||||
'aiohmi/util'],
|
||||
scripts=['bin/confluent', 'bin/confluent_selfcheck', 'bin/confluentdbutil', 'bin/collective', 'bin/osdeploy'],
|
||||
data_files=[('/etc/init.d', ['sysvinit/confluent']),
|
||||
('/usr/lib/sysctl.d', ['sysctl/confluent.conf']),
|
||||
|
||||
Reference in New Issue
Block a user