diff --git a/confluent_server/confluent/config/attributes.py b/confluent_server/confluent/config/attributes.py index a6ce0e96..bd08b39c 100644 --- a/confluent_server/confluent/config/attributes.py +++ b/confluent_server/confluent/config/attributes.py @@ -369,7 +369,7 @@ node = { 'the managed node. If not specified, then console ' 'is disabled. "ipmi" should be specified for most ' 'systems if console is desired.'), - 'validvalues': ('ssh', 'ipmi', 'openbmc', 'tsmsol'), + 'validvalues': ('ssh', 'ipmi', 'openbmc', 'tsmsol', 'vcenter'), }, # 'virtualization.host': { # 'description': ('Hypervisor where this node does/should reside'), diff --git a/confluent_server/confluent/plugins/hardwaremanagement/vcenter.py b/confluent_server/confluent/plugins/hardwaremanagement/vcenter.py new file mode 100644 index 00000000..8dc6aa60 --- /dev/null +++ b/confluent_server/confluent/plugins/hardwaremanagement/vcenter.py @@ -0,0 +1,315 @@ + +import codecs +import confluent.util as util +import confluent.messages as msg +import eventlet +import json +import struct +webclient = eventlet.import_patched('pyghmi.util.webclient') +import eventlet.green.socket as socket +import eventlet +import confluent.interface.console as conapi + + +def fixuuid(baduuid): + # VMWare changes the endian stuff in BIOS + uuidprefix = (baduuid[:8], baduuid[9:13], baduuid[14:18]) + a = codecs.encode(struct.pack(' 0: + portid = rsp[0][0]['port'] + rsp = self.wc.grab_json_response_with_status(f'/api/vcenter/vm/{vm}/hardware/serial/{portid}') + if rsp[1] == 200: + if rsp[0]['backing']['type'] != 'NETWORK_SERVER': + return + netloc = rsp[0]['backing']['network_location'] + portnum = netloc.split(':')[-1] + tlsenabled = False + if netloc.startswith('telnets'): + tlsenabled = True + hostinfo = self.get_vm_host(vm) + hostname = hostinfo['name'] + rsp[0] + return { + 'server': hostname, + 'port': portnum, + 'tls': tlsenabled, + } + + def get_vm_bootdev(self, vm): + vm = self.index_vm(vm) + rsp = self.wc.grab_json_response_with_status(f'/api/vcenter/vm/{vm}/hardware/boot') + if rsp[0]['enter_setup_mode']: + return 'setup' + rsp = self.wc.grab_json_response_with_status(f'/api/vcenter/vm/{vm}/hardware/boot/device') + if rsp[0][0]['type'] == 'ETHERNET': + return 'network' + return 'default' + + def get_vm_power(self, vm): + vm = self.index_vm(vm) + rsp = self.wc.grab_json_response(f'/api/vcenter/vm/{vm}/power') + if rsp['state'] == 'POWERED_ON': + return 'on' + if rsp['state'] == 'POWERED_OFF': + return 'off' + if rsp['state'] == 'SUSPENDED': + return 'suspended' + raise Exception("Unknown response {}".format(repr(rsp))) + + def set_vm_power(self, vm, state): + vm = self.index_vm(vm) + if state == 'boot': + current = self.get_vm_power(vm) + if current == 'on': + state = 'reset' + else: + state = 'start' + elif state == 'on': + state = 'start' + elif state == 'off': + state = 'stop' + rsp = self.wc.grab_json_response_with_status(f'/api/vcenter/vm/{vm}/power?action={state}', method='POST') + + + def set_vm_bootdev(self, vm, bootdev): + vm = self.index_vm(vm) + self.wc.set_header('Content-Type', 'application/json') + try: + bootdevs = [] + entersetup = False + if bootdev == 'setup': + entersetup = True + elif bootdev == 'default': + # In theory, we should be able to send an empty device list. + # However, vmware api counter to documentation seems to just ignore + # such a request. So instead we just go "disk first" + # and rely upon fast fail/retry to take us to a normal place + currdisks, rcode = self.wc.grab_json_response_with_status(f'/api/vcenter/vm/{vm}/hardware/disk') + currdisks = [x['disk'] for x in currdisks] + bootdevs.append({'type': 'DISK', 'disks': currdisks}) + elif bootdev in ('net', 'network'): + currnics, rcode = self.wc.grab_json_response_with_status(f'/api/vcenter/vm/{vm}/hardware/ethernet') + for nic in currnics: + bootdevs.append({'type': 'ETHERNET', 'nic': nic['nic']}) + payload = {'devices': bootdevs} + rsp = self.wc.grab_json_response_with_status(f'/api/vcenter/vm/{vm}/hardware/boot/device', + payload, + method='PUT') + rsp = self.wc.grab_json_response_with_status(f'/api/vcenter/vm/{vm}/hardware/boot', + {'enter_setup_mode': entersetup}, + method='PATCH') + finally: + del self.wc.stdheaders['Content-Type'] + + +def prep_vcsa_clients(nodes, configmanager): + cfginfo = configmanager.get_node_attributes(nodes, ['hardwaremanagement.manager', 'secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True) + clientsbyvcsa = {} + clientsbynode = {} + for node in nodes: + cfg = cfginfo[node] + currvcsa = cfg['hardwaremanagement.manager']['value'] + if currvcsa not in clientsbyvcsa: + user = cfg.get('secret.hardwaremanagementuser', {}).get('value', None) + passwd = cfg.get('secret.hardwaremanagementpassword', {}).get('value', None) + clientsbyvcsa[currvcsa] = VmwApiClient(currvcsa, user, passwd, configmanager) + clientsbynode[node] = clientsbyvcsa[currvcsa] + return clientsbynode + +def retrieve(nodes, element, configmanager, inputdata): + clientsbynode = prep_vcsa_clients(nodes, configmanager) + for node in nodes: + currclient = clientsbynode[node] + if element == ['power', 'state']: + yield msg.PowerState(node, currclient.get_vm_power(node)) + elif element == ['boot', 'nextdevice']: + yield msg.BootDevice(node, currclient.get_vm_bootdev(node)) + elif element[:2] == ['inventory', 'hardware'] and len(element) == 4: + for rsp in currclient.get_vm_inventory(node): + yield rsp + + + + +def update(nodes, element, configmanager, inputdata): + clientsbynode = prep_vcsa_clients(nodes, configmanager) + for node in nodes: + currclient = clientsbynode[node] + if element == ['power', 'state']: + currclient.set_vm_power(node, inputdata.powerstate(node)) + yield msg.PowerState(node, currclient.get_vm_power(node)) + elif element == ['boot', 'nextdevice']: + currclient.set_vm_bootdev(node, inputdata.bootdevice(node)) + yield msg.BootDevice(node, currclient.get_vm_bootdev(node)) + +# assume this is only console for now +def create(nodes, element, configmanager, inputdata): + clientsbynode = prep_vcsa_clients(nodes, configmanager) + for node in nodes: + serialdata = clientsbynode[node].get_vm_serial(node) + return VmConsole(serialdata['server'], serialdata['port'], serialdata['tls']) + + + +if __name__ == '__main__': + import sys + import os + from pprint import pprint + myuser = os.environ['VMWUSER'] + mypass = os.environ['VMWPASS'] + vc = VmwApiClient(sys.argv[1], myuser, mypass, None) + vm = sys.argv[2] + if sys.argv[3] == 'setboot': + vc.set_vm_bootdev(vm, sys.argv[4]) + vc.get_vm_bootdev(vm) + elif sys.argv[3] == 'power': + vc.set_vm_power(vm, sys.argv[4]) + elif sys.argv[3] == 'getinfo': + vc.get_vm(vm) + print("Bootdev: " + vc.get_vm_bootdev(vm)) + print("Power: " + vc.get_vm_power(vm)) + print("Serial: " + repr(vc.get_vm_serial(vm)))