From ec890d5286c966ddd8fe48f4eedda2e20620610f Mon Sep 17 00:00:00 2001 From: ddraganov Date: Mon, 15 Jul 2024 15:03:20 +0300 Subject: [PATCH] Add vSAN samples Supported by pyVmomi 8.0.3.0+ --- vsan-samples/ConfigureHciSample.py | 361 ++++++++++++++ vsan-samples/ConfigureHciWithESASample.py | 354 +++++++++++++ vsan-samples/VsanXvcHciSample.py | 321 ++++++++++++ vsan-samples/remotevsansamples.py | 218 ++++++++ vsan-samples/vsanAlarmConfigSample.py | 159 ++++++ .../vsanIOTripAnalyzerScheduleSamples.py | 278 +++++++++++ vsan-samples/vsanSpaceReportSamples.py | 208 ++++++++ vsan-samples/vsanapisamples.py | 149 ++++++ vsan-samples/vsanclientsamples.py | 216 ++++++++ vsan-samples/vsanclustershutdownsamples.py | 195 ++++++++ vsan-samples/vsancnsfilesamples.py | 464 ++++++++++++++++++ vsan-samples/vsancnssamples.py | 210 ++++++++ .../vsandataintransitencryptionsamples.py | 140 ++++++ vsan-samples/vsandeployersamples.py | 228 +++++++++ vsan-samples/vsandirectsamples.py | 169 +++++++ vsan-samples/vsanesaconfigurationsamples.py | 126 +++++ vsan-samples/vsanesastoragepoolsamples.py | 339 +++++++++++++ vsan-samples/vsanfssamples.py | 294 +++++++++++ .../vsanhealththresholdcustomizesample.py | 165 +++++++ vsan-samples/vsaniscsisamples.py | 216 ++++++++ .../vsanresyncetaimprovementsamples.py | 163 ++++++ vsan-samples/vsansharedwitnesssample.py | 270 ++++++++++ vsan-samples/vsanvumsamples.py | 133 +++++ vsan-samples/vsanwhatifdecom30samples.py | 147 ++++++ .../whatifDecom3DiskAndDiskGroupSamples.py | 436 ++++++++++++++++ 25 files changed, 5959 insertions(+) create mode 100644 vsan-samples/ConfigureHciSample.py create mode 100644 vsan-samples/ConfigureHciWithESASample.py create mode 100644 vsan-samples/VsanXvcHciSample.py create mode 100644 vsan-samples/remotevsansamples.py create mode 100644 vsan-samples/vsanAlarmConfigSample.py create mode 100644 vsan-samples/vsanIOTripAnalyzerScheduleSamples.py create mode 100644 vsan-samples/vsanSpaceReportSamples.py create mode 100644 vsan-samples/vsanapisamples.py create mode 100644 vsan-samples/vsanclientsamples.py create mode 100644 vsan-samples/vsanclustershutdownsamples.py create mode 100644 vsan-samples/vsancnsfilesamples.py create mode 100644 vsan-samples/vsancnssamples.py create mode 100644 vsan-samples/vsandataintransitencryptionsamples.py create mode 100644 vsan-samples/vsandeployersamples.py create mode 100644 vsan-samples/vsandirectsamples.py create mode 100644 vsan-samples/vsanesaconfigurationsamples.py create mode 100644 vsan-samples/vsanesastoragepoolsamples.py create mode 100644 vsan-samples/vsanfssamples.py create mode 100644 vsan-samples/vsanhealththresholdcustomizesample.py create mode 100644 vsan-samples/vsaniscsisamples.py create mode 100644 vsan-samples/vsanresyncetaimprovementsamples.py create mode 100644 vsan-samples/vsansharedwitnesssample.py create mode 100644 vsan-samples/vsanvumsamples.py create mode 100644 vsan-samples/vsanwhatifdecom30samples.py create mode 100644 vsan-samples/whatifDecom3DiskAndDiskGroupSamples.py diff --git a/vsan-samples/ConfigureHciSample.py b/vsan-samples/ConfigureHciSample.py new file mode 100644 index 00000000..667a59f1 --- /dev/null +++ b/vsan-samples/ConfigureHciSample.py @@ -0,0 +1,361 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2021-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for the vSphere HCI API to +set up an entire cluster, including DRS, vSAN, HA, vDS, ESX networking +and core ESX services.. + +It takes four steps ro build a HCI Cluster: +1.create datacenter by calling API CreateDatacenter(). +2.create cluster in datacenter by calling API CreateClusterEx(). +3.add host to cluster by calling API AddHost_Task(). +4.configure HCI by enabling vSAN using API ConfigureHCI_Task(). + +The API ConfigureHCI_Task() is available since vSphere 6.7 Update 1 release. + +""" + +__author__ = 'Broadcom, Inc' +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils +from subprocess import Popen, PIPE + +datacenterName = "HCI-Datacenter" +clusterName = "VSAN-Cluster" + + +class DrsInfo: + def __init__(self, + enabled=True, + vmotionRate=5, + behavior=vim.cluster.DrsConfigInfo.DrsBehavior.fullyAutomated): + self.enabled = enabled + self.vmotionRate = vmotionRate + self.behavior = behavior + + def ToDrsConfig(self): + drsConfig =vim.cluster.DrsConfigInfo() + drsConfig.enabled = self.enabled + drsConfig.defaultVmBehavior = self.behavior + drsConfig.vmotionRate = self.vmotionRate + return drsConfig + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-i', '--vc', required=True, action='store', + help='IP of vCenter') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('-ips', '--hostIps', required=True, action='store', + help='IPs of the hosts to be added to the cluster,\ + The IPs of the hosts, splitted by commar') + parser.add_argument('-hu', '--hostUsername', required=True, action='store', + help='Username of the hosts') + parser.add_argument('-hp', '--hostPassword', required=True, action='store', + help='Password of the hosts') + args = parser.parse_args() + return args + +def getSslThumbprint(addr): + import ssl + import socket + import hashlib + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(1) + wrappedSocket = ssl.wrap_socket(sock) + thumbPrint = None + try: + wrappedSocket.connect((addr, 443)) + except: + response = False + else: + der_cert_bin = wrappedSocket.getpeercert(True) + pem_cert = ssl.DER_cert_to_PEM_cert(wrappedSocket.getpeercert(True)) + thumb_sha1 = hashlib.sha1(der_cert_bin) + thumb_sha1 = str(hashlib.sha1(der_cert_bin).hexdigest()).upper() + thumbPrint = ":".join(a+b for a,b in\ + zip(thumb_sha1[::2], thumb_sha1[1::2])) + wrappedSocket.close() + return thumbPrint + +def CreateHostConfigProfile(ntpServer, lockdownMode): + ntpServers = [ntpServer] + ntpConfig = vim.HostNtpConfig(server=ntpServers) + dateTimeConfig = vim.HostDateTimeConfig(ntpConfig=ntpConfig) + hostConfigProfile = \ + vim.ClusterComputeResource.\ + HostConfigurationProfile(dateTimeConfig=dateTimeConfig, + lockdownMode=lockdownMode) + return hostConfigProfile + +def GetVcProf(): + drsInfo = \ + DrsInfo(vmotionRate=2, + behavior=vim.cluster.DrsConfigInfo.DrsBehavior.fullyAutomated) + + vcProf = vim.ClusterComputeResource.VCProfile() + configSpec = vim.cluster.ConfigSpecEx() + configSpec.drsConfig = drsInfo.ToDrsConfig() + vcProf.clusterSpec = configSpec + vcProf.evcModeKey = "intel-merom" + + return vcProf + +def GetFreePnicList(host): + networkSystem = host.configManager.networkSystem + # pnic spec will have a non-NULL entry for linkSpeed if the pnic + # link-state is UP. + allUpPnics = list(map(lambda z: z.device, + filter(lambda x: x.linkSpeed is not None, networkSystem.networkInfo.pnic))) + # Iterate through all vswitches and read the uplink devices + # connected to each. + usedNicsOnVss = list(map(lambda z: z.spec.bridge.nicDevice, + filter(lambda x: x.spec.bridge is not None and + len(x.spec.bridge.nicDevice) != 0, networkSystem.networkInfo.vswitch))) + # Iterate through all vds'es and read the uplink devices connected to each. + # Firstly, obtain the list of all proxySwitches + # that have a non-empty list of uplinks. + # From this list, create a list of pnic objects. The pnic object is of type + # pyVmomi.VmomiSupport.Link[], and the first element within that list has the + # pnic name. + usedNicsOnProxy = list(map(lambda y: y[0], + map(lambda z: z.pnic, + filter(lambda x: x.pnic is not None and len(x.pnic) > 0, + host.config.network.proxySwitch)))) + + usedVssPnics = [] + if len(usedNicsOnVss) > 1: + """ + In this case, usedVnicsOnVss returns an array of type: + [(str) [ 'vmnic0' ], (str) [ 'vmnic5' ]] + To obtain the entire list of vmnics, we need to read the first + element saved in pyVmomi.VmomiSupport.str[]. + """ + usedVssPnics = list(map(lambda x: x[0], usedNicsOnVss)) + elif len(usedNicsOnVss) == 1: + """ + There's only one used vnic, e.g: + (str) [ 'vmnic0' ] + """ + usedVssPnics = list(filter(lambda x: x, usedNicsOnVss[0])) + + if usedNicsOnProxy: + # usedNicsOnProxy[0] is a Link[], each element of which looks like + # 'key-vim.host.PhysicalNic-vmnic1' + pnicsOnProxy = list(map(lambda x: str(x.split('-')[-1]), usedNicsOnProxy)) + usedVssPnics += pnicsOnProxy + + freePnics = set(allUpPnics) - set(usedVssPnics) + + if len(freePnics) >= 1: + return freePnics + return [] + +def CreateDvpgSpec(dvpgName): + dvpgSpec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + dvpgSpec.numPorts = 128 + dvpgSpec.name = dvpgName + dvpgSpec.type = "earlyBinding" + return dvpgSpec + +def CreateDvsProfile(dvsName, pnicDevices, dvpgNameAndService, + dvsMoRef=None, dvpgMoRefAndService=None): + dvsProf = vim.ClusterComputeResource.DvsProfile() + dvsProf.pnicDevices = pnicDevices + dvsProf.dvsName = dvsName + dvsProf.dvSwitch = dvsMoRef + dvpgToServiceMappings = [] + + if dvpgNameAndService is not None: + # Populate the dvportgroup mappings with dvportgroup specs. + for dvpgName, service in dvpgNameAndService: + dvpgToServiceMapping =\ + vim.ClusterComputeResource.\ + DvsProfile.DVPortgroupSpecToServiceMapping( + dvPortgroupSpec=CreateDvpgSpec(dvpgName), + service=service) + dvpgToServiceMappings.append(dvpgToServiceMapping) + if dvpgMoRefAndService is not None: + # Populate the dvportgroup mappings with dvportgroup MoRefs. + for dvpgMoRef, service in dvpgMoRefAndService: + dvpgToServiceMapping =\ + vim.ClusterComputeResource.\ + DvsProfile.DVPortgroupSpecToServiceMapping( + dvPortgroup=dvpgMoRef, service=service) + dvpgToServiceMappings.append(dvpgToServiceMapping) + dvsProf.dvPortgroupMapping = dvpgToServiceMappings + return dvsProf + +def GetDvsProfiles(hosts): + dvpgNameAndService = [ + ("vmotion-dvpg", "vmotion"), + ("vsan-dvpg", "vsan")] + + dvsName = "hci-dvs-new" + freePnic = list(GetFreePnicList(hosts[0]))[0] + + # setup DVS profile + dvsProf = CreateDvsProfile(dvsName, freePnic, dvpgNameAndService) + + return [dvsProf] + +def CreateDefaultVSanSpec(vSanCfgInfo): + dedupConfig = vim.vsan.DataEfficiencyConfig(dedupEnabled=False) + encryptionConfig = \ + vim.vsan.DataEncryptionConfig(encryptionEnabled=False) + + vSanSpec = vim.vsan.ReconfigSpec( + vsanClusterConfig=vSanCfgInfo, + dataEfficiencyConfig=dedupConfig, + dataEncryptionConfig=encryptionConfig, + modify=True, + allowReducedRedundancy=False + ) + return vSanSpec + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for VC %s and ' + 'user %s: ' % (args.vc,args.user)) + if args.hostPassword: + hostPassword = args.hostPassword + else: + hostPassword = getpass.getpass(prompt='Enter password for Esxi %s and ' + 'user %s: ' % (args.vcIps,args.hostUsername)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.vc, + user=args.user, + pwd=password, + port=443, + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the vcis vCenter or ESXi. + aboutInfo = si.content.about + + if aboutInfo.apiType != 'VirtualCenter': + print("The HCI APIs are only available on vCenter") + exit(1) + + folder = si.content.rootFolder + dc = folder.CreateDatacenter(datacenterName) + print("Create datacenter %s succeeded" % datacenterName) + hostFolder = dc.hostFolder + clusterSpec = vim.ClusterConfigSpecEx( + inHciWorkflow = True) + vsanConfig = vim.vsan.cluster.ConfigInfo() + vsanConfig.enabled = True + clusterSpec.vsanConfig = vsanConfig + cluster = hostFolder.CreateClusterEx(name = clusterName, spec = clusterSpec) + print("Create cluster %s succeeded" % clusterName) + + hostIps = args.hostIps.split(',') + tasks = [] + hostSpecs = [] + hostFolder = dc.hostFolder + for hostIp in hostIps: + hostSpec = vim.Folder.NewHostSpec() + sslThumbprint = getSslThumbprint(hostIp) + hostConnSpec = vim.HostConnectSpec(hostName=hostIp, + userName=args.hostUsername, + force=True, + port=443, + password=hostPassword, + sslThumbprint=sslThumbprint, + ) + hostSpec.hostCnxSpec = hostConnSpec + hostSpecs.append(hostSpec) + task = hostFolder.BatchAddHostsToCluster_Task(cluster, + hostSpecs, + None, + None, + 'maintenance') + print("Adding host ...") + tasks.append(task) + vsanapiutils.WaitForTasks(tasks, si) + print("Configuring HCI for cluster %s ..." % clusterName) + hciCfgs =[] + for mo in cluster.host: + hciCfg = vim.ClusterComputeResource.HostConfigurationInput() + hciCfg.host = mo + hciCfgs.append(hciCfg) + lockdownMode = \ + vim.host.HostAccessManager.LockdownMode.lockdownDisabled + NTP_SERVER = "time-c-b.nist.gov" + hostConfigProfile = CreateHostConfigProfile(NTP_SERVER, lockdownMode) + vSanCfgInfo = vim.vsan.cluster.ConfigInfo( + enabled=True, + defaultConfig=vim.vsan.cluster.ConfigInfo.HostDefaultInfo( + autoClaimStorage=False)) + vSanSpec = CreateDefaultVSanSpec(vSanCfgInfo) + vcProf = GetVcProf() + dvsProfiles = GetDvsProfiles(cluster.host) + clusterHciSpec = vim.ClusterComputeResource.HCIConfigSpec( + hostConfigProfile=hostConfigProfile, + vSanConfigSpec=vSanSpec, + vcProf=vcProf, + dvsProf=dvsProfiles) + + task=cluster.ConfigureHCI_Task(clusterSpec = clusterHciSpec,\ + hostInputs = hciCfgs) + vsanapiutils.WaitForTasks([task], si) + print("Successfully configured HCI cluster %s" % clusterName) + + # vSAN cluster health summary can be cached at vCenter. + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.vc, port=443) + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vhs = vcMos['vsan-cluster-health-system'] + fetchFromCache = True + fetchFromCacheAnswer = input( + 'Do you want to fetch the cluster health from cache if exists?(y/n):') + if fetchFromCacheAnswer.lower() == 'n': + fetchFromCache = False + print('Fetching cluster health from cached state: %s' % + ('Yes' if fetchFromCache else 'No')) + healthSummary = vhs.QueryClusterHealthSummary( + cluster=cluster, includeObjUuids=True, fetchFromCache=fetchFromCache) + clusterStatus = healthSummary.clusterStatus + + print("Cluster %s Status: %s" % (clusterName, clusterStatus.status)) + for hostStatus in clusterStatus.trackedHostsStatus: + print("Host %s Status: %s" % (hostStatus.hostname, hostStatus.status)) + +if __name__ == "__main__": + main() diff --git a/vsan-samples/ConfigureHciWithESASample.py b/vsan-samples/ConfigureHciWithESASample.py new file mode 100644 index 00000000..0c53ab06 --- /dev/null +++ b/vsan-samples/ConfigureHciWithESASample.py @@ -0,0 +1,354 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for the vSphere HCI API to +set up an entire cluster with vSAN ESA, including DRS, HA, vDS, ESX networking +and core ESX services.. + +It takes four steps to build a HCI Cluster: +1.create datacenter by calling API CreateDatacenter(). +2.create cluster in datacenter by calling API CreateClusterEx(). +3.add host to cluster by calling API AddHost_Task(). +4.configure HCI by enabling vSAN ESA using API ConfigureHCI_Task(). + +""" + +__author__ = 'Broadcom, Inc' +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils +from subprocess import Popen, PIPE + +datacenterName = "HCI-Datacenter" +clusterName = "VSAN-ESA-Cluster" + + +class DrsInfo: + def __init__(self, + enabled=True, + vmotionRate=5, + behavior=vim.cluster.DrsConfigInfo.DrsBehavior.fullyAutomated): + self.enabled = enabled + self.vmotionRate = vmotionRate + self.behavior = behavior + + def ToDrsConfig(self): + drsConfig =vim.cluster.DrsConfigInfo() + drsConfig.enabled = self.enabled + drsConfig.defaultVmBehavior = self.behavior + drsConfig.vmotionRate = self.vmotionRate + return drsConfig + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-i', '--vc', required=True, action='store', + help='IP of vCenter') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('-ips', '--hostIps', required=True, action='store', + help='IPs of the hosts to be added to the cluster,\ + The IPs of the hosts, splitted by commar') + parser.add_argument('-hu', '--hostUsername', required=True, action='store', + help='Username of the hosts') + parser.add_argument('-hp', '--hostPassword', required=True, action='store', + help='Password of the hosts') + args = parser.parse_args() + return args + +def getSslThumbprint(addr): + import ssl + import socket + import hashlib + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(1) + wrappedSocket = ssl.wrap_socket(sock) + thumbPrint = None + try: + wrappedSocket.connect((addr, 443)) + except: + response = False + else: + der_cert_bin = wrappedSocket.getpeercert(True) + pem_cert = ssl.DER_cert_to_PEM_cert(wrappedSocket.getpeercert(True)) + thumb_sha1 = hashlib.sha1(der_cert_bin) + thumb_sha1 = str(hashlib.sha1(der_cert_bin).hexdigest()).upper() + thumbPrint = ":".join(a+b for a,b in\ + zip(thumb_sha1[::2], thumb_sha1[1::2])) + wrappedSocket.close() + return thumbPrint + +def CreateHostConfigProfile(ntpServer, lockdownMode): + ntpServers = [ntpServer] + ntpConfig = vim.HostNtpConfig(server=ntpServers) + dateTimeConfig = vim.HostDateTimeConfig(ntpConfig=ntpConfig) + hostConfigProfile = \ + vim.ClusterComputeResource.\ + HostConfigurationProfile(dateTimeConfig=dateTimeConfig, + lockdownMode=lockdownMode) + return hostConfigProfile + +def GetVcProf(): + drsInfo = \ + DrsInfo(vmotionRate=2, + behavior=vim.cluster.DrsConfigInfo.DrsBehavior.fullyAutomated) + + vcProf = vim.ClusterComputeResource.VCProfile() + configSpec = vim.cluster.ConfigSpecEx() + configSpec.drsConfig = drsInfo.ToDrsConfig() + vcProf.clusterSpec = configSpec + vcProf.evcModeKey = "intel-merom" + + return vcProf + +def GetFreePnicList(host): + networkSystem = host.configManager.networkSystem + # pnic spec will have a non-NULL entry for linkSpeed if the pnic + # link-state is UP. + allUpPnics = list(map(lambda z: z.device, + filter(lambda x: x.linkSpeed is not None, networkSystem.networkInfo.pnic))) + # Iterate through all vswitches and read the uplink devices + # connected to each. + usedNicsOnVss = list(map(lambda z: z.spec.bridge.nicDevice, + filter(lambda x: x.spec.bridge is not None and + len(x.spec.bridge.nicDevice) != 0, networkSystem.networkInfo.vswitch))) + # Iterate through all vds'es and read the uplink devices connected to each. + # Firstly, obtain the list of all proxySwitches + # that have a non-empty list of uplinks. + # From this list, create a list of pnic objects. The pnic object is of type + # pyVmomi.VmomiSupport.Link[], and the first element within that list has the + # pnic name. + usedNicsOnProxy = list(map(lambda y: y[0], + map(lambda z: z.pnic, + filter(lambda x: x.pnic is not None and len(x.pnic) > 0, + host.config.network.proxySwitch)))) + + usedVssPnics = [] + if len(usedNicsOnVss) > 1: + """ + In this case, usedVnicsOnVss returns an array of type: + [(str) [ 'vmnic0' ], (str) [ 'vmnic5' ]] + To obtain the entire list of vmnics, we need to read the first + element saved in pyVmomi.VmomiSupport.str[]. + """ + usedVssPnics = list(map(lambda x: x[0], usedNicsOnVss)) + elif len(usedNicsOnVss) == 1: + """ + There's only one used vnic, e.g: + (str) [ 'vmnic0' ] + """ + usedVssPnics = list(filter(lambda x: x, usedNicsOnVss[0])) + + if usedNicsOnProxy: + # usedNicsOnProxy[0] is a Link[], each element of which looks like + # 'key-vim.host.PhysicalNic-vmnic1' + pnicsOnProxy = list(map(lambda x: str(x.split('-')[-1]), usedNicsOnProxy)) + usedVssPnics += pnicsOnProxy + + freePnics = set(allUpPnics) - set(usedVssPnics) + + if len(freePnics) >= 1: + return freePnics + return [] + +def CreateDvpgSpec(dvpgName): + dvpgSpec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + dvpgSpec.numPorts = 128 + dvpgSpec.name = dvpgName + dvpgSpec.type = "earlyBinding" + return dvpgSpec + +def CreateDvsProfile(dvsName, pnicDevices, dvpgNameAndService, + dvsMoRef=None, dvpgMoRefAndService=None): + dvsProf = vim.ClusterComputeResource.DvsProfile() + dvsProf.pnicDevices = pnicDevices + dvsProf.dvsName = dvsName + dvsProf.dvSwitch = dvsMoRef + dvpgToServiceMappings = [] + + if dvpgNameAndService is not None: + # Populate the dvportgroup mappings with dvportgroup specs. + for dvpgName, service in dvpgNameAndService: + dvpgToServiceMapping =\ + vim.ClusterComputeResource.\ + DvsProfile.DVPortgroupSpecToServiceMapping( + dvPortgroupSpec=CreateDvpgSpec(dvpgName), + service=service) + dvpgToServiceMappings.append(dvpgToServiceMapping) + if dvpgMoRefAndService is not None: + # Populate the dvportgroup mappings with dvportgroup MoRefs. + for dvpgMoRef, service in dvpgMoRefAndService: + dvpgToServiceMapping =\ + vim.ClusterComputeResource.\ + DvsProfile.DVPortgroupSpecToServiceMapping( + dvPortgroup=dvpgMoRef, service=service) + dvpgToServiceMappings.append(dvpgToServiceMapping) + dvsProf.dvPortgroupMapping = dvpgToServiceMappings + return dvsProf + +def GetDvsProfiles(hosts): + dvpgNameAndService = [ + ("vmotion-dvpg", "vmotion"), + ("vsan-dvpg", "vsan")] + + dvsName = "hci-dvs-new" + freePnic = list(GetFreePnicList(hosts[0]))[0] + + # setup DVS profile + dvsProf = CreateDvsProfile(dvsName, freePnic, dvpgNameAndService) + + return [dvsProf] + +def CreateDefaultVSanSpec(vSanCfgInfo): + encryptionConfig = \ + vim.vsan.DataEncryptionConfig(encryptionEnabled=False) + + vSanSpec = vim.vsan.ReconfigSpec( + vsanClusterConfig=vSanCfgInfo, + dataEncryptionConfig=encryptionConfig, + modify=True, + allowReducedRedundancy=False + ) + return vSanSpec + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for VC %s and ' + 'user %s: ' % (args.vc,args.user)) + if args.hostPassword: + hostPassword = args.hostPassword + else: + hostPassword = getpass.getpass(prompt='Enter password for Esxi %s and ' + 'user %s: ' % (args.vcIps,args.hostUsername)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.vc, + user=args.user, + pwd=password, + port=443, + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the vcis vCenter or ESXi. + aboutInfo = si.content.about + + if aboutInfo.apiType != 'VirtualCenter': + print("The HCI APIs are only available on vCenter") + exit(1) + + folder = si.content.rootFolder + dc = folder.CreateDatacenter(datacenterName) + print("Create datacenter %s succeeded" % datacenterName) + hostFolder = dc.hostFolder + clusterSpec = vim.ClusterConfigSpecEx( + inHciWorkflow = True) + vsanConfig = vim.vsan.cluster.ConfigInfo(enabled=True, vsanEsaEnabled=True) + clusterSpec.vsanConfig = vsanConfig + cluster = hostFolder.CreateClusterEx(name = clusterName, spec = clusterSpec) + print("Create cluster %s succeeded" % clusterName) + + hostIps = args.hostIps.split(',') + tasks = [] + hostSpecs = [] + hostFolder = dc.hostFolder + for hostIp in hostIps: + hostSpec = vim.Folder.NewHostSpec() + sslThumbprint = getSslThumbprint(hostIp) + hostConnSpec = vim.HostConnectSpec(hostName=hostIp, + userName=args.hostUsername, + force=True, + port=443, + password=hostPassword, + sslThumbprint=sslThumbprint, + ) + hostSpec.hostCnxSpec = hostConnSpec + hostSpecs.append(hostSpec) + task = hostFolder.BatchAddHostsToCluster_Task(cluster, + hostSpecs, + None, + None, + 'maintenance') + print("Adding host ...") + tasks.append(task) + vsanapiutils.WaitForTasks(tasks, si) + print("Configuring HCI for cluster %s ..." % clusterName) + hciCfgs =[] + for mo in cluster.host: + hciCfg = vim.ClusterComputeResource.HostConfigurationInput() + hciCfg.host = mo + hciCfgs.append(hciCfg) + lockdownMode = \ + vim.host.HostAccessManager.LockdownMode.lockdownDisabled + NTP_SERVER = "time-c-b.nist.gov" + hostConfigProfile = CreateHostConfigProfile(NTP_SERVER, lockdownMode) + vSanCfgInfo = vim.vsan.cluster.ConfigInfo( + enabled=True, vsanEsaEnabled=True) + vSanSpec = CreateDefaultVSanSpec(vSanCfgInfo) + vcProf = GetVcProf() + dvsProfiles = GetDvsProfiles(cluster.host) + clusterHciSpec = vim.ClusterComputeResource.HCIConfigSpec( + hostConfigProfile=hostConfigProfile, + vSanConfigSpec=vSanSpec, + vcProf=vcProf, + dvsProf=dvsProfiles) + + task=cluster.ConfigureHCI_Task(clusterSpec = clusterHciSpec,\ + hostInputs = hciCfgs) + vsanapiutils.WaitForTasks([task], si) + print("Successfully configured HCI cluster %s" % clusterName) + + # vSAN cluster health summary can be cached at vCenter. + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.vc, port=443) + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vhs = vcMos['vsan-cluster-health-system'] + fetchFromCache = True + fetchFromCacheAnswer = input( + 'Do you want to fetch the cluster health from cache if exists?(y/n):') + if fetchFromCacheAnswer.lower() == 'n': + fetchFromCache = False + print('Fetching cluster health from cached state: %s' % + ('Yes' if fetchFromCache else 'No')) + healthSummary = vhs.QueryClusterHealthSummary( + cluster=cluster, includeObjUuids=True, fetchFromCache=fetchFromCache) + clusterStatus = healthSummary.clusterStatus + + print("Cluster %s Status: %s" % (clusterName, clusterStatus.status)) + for hostStatus in clusterStatus.trackedHostsStatus: + print("Host %s Status: %s" % (hostStatus.hostname, hostStatus.status)) + +if __name__ == "__main__": + main() diff --git a/vsan-samples/VsanXvcHciSample.py b/vsan-samples/VsanXvcHciSample.py new file mode 100644 index 00000000..a8875537 --- /dev/null +++ b/vsan-samples/VsanXvcHciSample.py @@ -0,0 +1,321 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2022-2024 Broadcom. All Rights Reserved. +Broadcom Confidential. The term "Broadcom" refers to Broadcom Inc. + +This file includes sample codes for the xvc HCI API: +1. PrecheckDatastoreSource +2. CreateDatastoreSource +3. DestroyDatastoreSource +4. QueryHciMeshDatastores +5. mount the cluster to a remote server cluster +6. unmount the cluster from a remote server cluster + +""" + +__author__ = 'Broadcom, Inc' +import sys +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim, VmomiSupport +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for xvc hcimesh') + parser.add_argument('-cip', '--clientIp', required=True, action='store', + help='IP of the client.') + parser.add_argument('-cc', '--clientCluster', required=True, action='store', + help='Name of the client cluster.') + parser.add_argument('-cdc', '--clientDatacenter', required=True, + action='store', + help='Name of the client Datacenter.') + parser.add_argument('-sip', '--serverIp', required=True, action='store', + help='IP of the server to be mounted.') + parser.add_argument('-sc', '--serverCluster', required=True, action='store', + help='Name of the server cluster to be mounted.') + parser.add_argument('-sdc', '--serverDatacenter', required=True, + action='store', + help='Name of the server datacenter to be mounted.') + parser.add_argument('-sds', '--serverDatastore', required=True, + action='store', + help='Name of the server datastore to be mounted.') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=True, action='store', + help='Password to use when connecting to host') + args = parser.parse_args() + return args + +def GetSiAndMos(args, context, host, port=443): + si = SmartConnect(host=host, + user=args.user, + pwd=args.password, + port=port, + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the server host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(host, port) + if aboutInfo.apiType != 'VirtualCenter': + print("The XVC HCI APIs are only available on vCenter") + exit(1) + + # Get vSAN remote datastore system from the vCenter Managed + # Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + return si, vcMos + +def getClusterInstance(clusterName, datacenterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + if datacenterName == datacenter.name: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def getClusterInstanceHelper(clusterName, datacenterName, si, host): + if clusterName: + clusterInstance = getClusterInstance(clusterName, datacenterName, si) + if clusterInstance is None: + print("Cluster %s is not found for %s" % (clusterName, host)) + return None + else: + print('Server or Client cluster name argument is not provided') + return None + return clusterInstance + +def getDatastoreInstance(cluster, datastoreName): + for ds in cluster.datastore: + if ds.summary.type == 'vsan' and \ + ds.summary.name == datastoreName: + return ds + print("The datastore %s couldn't be found for cluster %s" % \ + (datastoreName, cluster.name)) + return None + +def VerifyPrecheckResult(result, checkTargetName): + abnormalItems = [] + for precheckItem in result.result: + if precheckItem.status != "green": + abnormalItems.append(precheckItem) + if abnormalItems: + print('Abnormal result found prechecking %s: %s' % abnormalItems) + return -1 + +# Query remote vCenter information BEFORE adding it as a Datastore Source +def QueryHciMeshDs(args, clientSi, vcMos): + # QueryHciMeshDatastores API + vrds = vcMos['vsan-remote-datastore-system'] + cert = None + # Invoke PrecheckDatastoreSource without specify a valid cert + try: + vrds.CreateDatastoreSource(vim.vsan.HciMeshDatastoreSource( + vcInfo=vim.vsan.RemoteVcInfoStandalone( + linkType='standalone', + vcHost=args.serverIp, + user=args.user, + password=args.password))) + except vim.fault.VsanSslVerifyCertFault as e: + cert = e.cert + print('CreateDatastoreSource for %s: Got SSL verify fault: %s' % + (args.serverIp, cert)) + + querySpecs = [vim.vsan.XvcQuerySpec(objectModel='datastore'), + vim.vsan.XvcQuerySpec(objectModel='providerVcenter'), + vim.vsan.XvcQuerySpec(objectModel='clientVcenter'), + vim.vsan.XvcQuerySpec(objectModel='clientCluster')] + remoteVcInfos = [vim.vsan.RemoteVcInfoStandalone( + linkType='standalone', + vcHost=args.serverIp, + user=args.user, + password=args.password, + cert=cert)] + results = vrds.QueryHciMeshDatastores(querySpecs, remoteVcInfos) + print('Query everything of %s before create Datastore Source: %s' % + (args.serverIp, results)) + +def PrecheckAndCreateDatastoreSource(args, clientSi, vcMos): + vrds = vcMos['vsan-remote-datastore-system'] + cert = None + + # Invoke PrecheckDatastoreSource without specify a valid cert + try: + vrds.PrecheckDatastoreSource(vim.vsan.HciMeshDatastoreSource( + vcInfo=vim.vsan.RemoteVcInfoStandalone( + linkType='standalone', + vcHost=args.serverIp, + user=args.user, + password=args.password))) + except vim.fault.VsanSslVerifyCertFault as e: + cert = e.cert + print( + 'PrecheckDatastoreSource for %s: Got SSL verify fault: %s' % + (args.serverIp, cert)) + + # Invoke CreateDatastoreSource without specify a valid cert + try: + vrds.CreateDatastoreSource(vim.vsan.HciMeshDatastoreSource( + vcInfo=vim.vsan.RemoteVcInfoStandalone( + linkType='standalone', + vcHost=args.serverIp, + user=args.user, + password=args.password))) + except vim.fault.VsanSslVerifyCertFault as e: + cert = e.cert + print( + 'CreateDatastoreSource for %s: Got SSL verify fault: %s' % + (args.serverIp, cert)) + + # call PrecheckDatastoreSource with a valid cert + results=vrds.PrecheckDatastoreSource(vim.vsan.HciMeshDatastoreSource( + vcInfo=vim.vsan.RemoteVcInfoStandalone( + linkType='standalone', vcHost=args.serverIp, + user=args.user, password=args.password, + cert=cert)), operation="checkCreateDs") + print('PrecheckDatastoreSource for %s: %s' % + (args.serverIp, results)) + VerifyPrecheckResult(results, args.serverIp) + + # call CreateDatastoreSource with a valid cert + task = vrds.CreateDatastoreSource(vim.vsan.HciMeshDatastoreSource( + vcInfo=vim.vsan.RemoteVcInfoStandalone( + linkType='standalone', vcHost=args.serverIp, + user=args.user, + password=args.password, + cert=cert))) + task = vim.Task(task._moId, clientSi._stub) + vsanapiutils.WaitForTasks([task], clientSi) + if task.info.state != 'success': + print('Failed to create datastore source with error: %s' + % task.info.error) + return -1 + print('Successfully create datastore source.') + +def QueryDs(args, vcMos): + vrds = vcMos['vsan-remote-datastore-system'] + results = vrds.QueryDatastoreSource() + print('QueryDatastoreSource: %s' % results) + assert(len(results) == 1 and + results[0].vcInfo.vcHost == args.serverIp) + +def MountUnmountCluster(args, clientSi, clientMos, clientCluster, serverDs): + vccs = clientMos['vsan-cluster-config-system'] + csConfig = vccs.GetConfigInfoEx(clientCluster) + xvcDatastores = getattr(csConfig.xvcDatastoreConfig, 'xvcDatastores', + None) + if not xvcDatastores: + xvcDatastores = [] + + # The newly mounting remote vCenter datastore + xvcDatastores.append(vim.vsan.XVCDatastoreInfo( + datastore = serverDs, + ownerVc = args.serverIp)) + + # Mount a remote vCenter datastore + xvcDatastoreConfig = vim.vsan.XVCDatastoreConfig( + xvcDatastores = xvcDatastores) + spec = vim.vsan.ReconfigSpec(xvcDatastoreConfig=xvcDatastoreConfig) + vsanTask = vccs.ReconfigureEx(clientCluster, spec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, clientSi._stub) + vsanapiutils.WaitForTasks([vcTask], clientSi) + if vcTask.info.state != 'success': + print('Failed to mount remote datastore with error: %s' + % vcTask.info.error) + return -1 + print('Successfully mounted remote vSAN datastore %s on cluster %s' + % (serverDs.name, clientCluster.name)) + + # Test unmount + xvcDatastoreConfig = vim.vsan.XVCDatastoreConfig( + xvcDatastores = []) + spec = vim.vsan.ReconfigSpec(xvcDatastoreConfig=xvcDatastoreConfig) + vsanTask = vccs.ReconfigureEx(clientCluster, spec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, clientSi._stub) + vsanapiutils.WaitForTasks([vcTask], clientSi) + if vcTask.info.state != 'success': + print('Failed to unmount remote datastore with error: %s' + % vcTask.info.error) + return -1 + print('Successfully unmounted remote vSAN datastore %s on cluster %s' + % (serverDs.name, clientCluster.name)) + +def DestroyDs(args, clientSi, clientMos): + vrds = clientMos['vsan-remote-datastore-system'] + # Test DestoryDatastoreSource + results=vrds.PrecheckDatastoreSource(vim.vsan.HciMeshDatastoreSource( + vcInfo=vim.vsan.RemoteVcInfoStandalone( + linkType='standalone', vcHost=args.serverIp, + user=args.user,password=args.password)), + operation="checkDestroyDs") + print('PrecheckDatastoreSource for %s: %s' % + (args.serverIp, results)) + VerifyPrecheckResult(results, args.serverIp) + + task = vrds.DestroyDatastoreSource( + vim.vsan.HciMeshDatastoreSource( + vcInfo=vim.vsan.RemoteVcInfoStandalone( + linkType='standalone', vcHost=args.serverIp, + user=args.user,password=args.password))) + task = vim.Task(task._moId, clientSi._stub) + vsanapiutils.WaitForTasks([task], clientSi) + if task.info.state != 'success': + print('Failed to destory datastore source with error: %s' + % task.info.error) + return -1 + print('Successfully destory datastore source') + +def main(): + args = GetArgs() + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + # Get client and server cluster and datastore instances + clientSi, clientMos = GetSiAndMos(args, context, args.clientIp) + clientCluster = getClusterInstanceHelper(args.clientCluster, + args.clientDatacenter, + clientSi, args.clientIp) + + serverSi, serverMos = GetSiAndMos(args, context, args.serverIp) + serverCluster = getClusterInstanceHelper(args.serverCluster, + args.serverDatacenter, + serverSi, args.serverIp) + serverDs = getDatastoreInstance(serverCluster, args.serverDatastore) + if serverDs is None: + return -1 + + QueryHciMeshDs(args, clientSi, clientMos) + PrecheckAndCreateDatastoreSource(args, clientSi, clientMos) + QueryDs(args, clientMos) + MountUnmountCluster(args, clientSi, clientMos, clientCluster, serverDs) + DestroyDs(args, clientSi, clientMos) + +if __name__ == "__main__": + main() diff --git a/vsan-samples/remotevsansamples.py b/vsan-samples/remotevsansamples.py new file mode 100644 index 00000000..5f6a2f6b --- /dev/null +++ b/vsan-samples/remotevsansamples.py @@ -0,0 +1,218 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2016-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample code for vCenter vSAN API accessing. + +To provide an example of vCenter side vSAN API access, it shows how to run +Mount Precheck, Mount and Unmount a remote vSAN datastore using +VsanRemoteDatastoreSystem MO. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--servercluster', dest='serverClusterName', + metavar="CLUSTER", default='serverVsanDatastore') + parser.add_argument('--clientcluster', dest='clientClusterName', + metavar="CLUSTER", default='clientVsanDatastore') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def GetClusterUuid(cluster): + if cluster.configurationEx.vsanConfigInfo.enabled == False: + print('Cluster is not vSAN enabled') + return -1 + return cluster.configurationEx.vsanConfigInfo.defaultConfig.uuid + +# Helper function to convert Cid to VsanUuid format +def ConvertCidToVsanUuid(cid): + return "%s-%s-%s-%s-%s" % (cid[0:8], # nnnnnnnn + cid[8:12], # nnnn + cid[12:16], # nnnn + cid[17:21], # nnnn + cid[21:33]) # nnnnnnnnnnnn + +def getLocalVsanDatastore(cluster): + """ + Get local vsan datastore with cluster instance + @param cluster Given cluster reference + @return localDs Local Vsan datastore + """ + # Get all cluster's vsan datastores (local + remote) + # If cluster uuid matches, it's a local datastore + clusterUuid = GetClusterUuid(cluster) + localDs = [ds for ds in cluster.datastore if ds.summary.type == 'vsan' \ + and ConvertCidToVsanUuid(ds.info.containerId) == clusterUuid] + return localDs + +def verifyPrecheckFailedResult(result): + """ + For checking the MountPrecheck failed result in detail + E.g. Some connectivity issue in a cluster Like, cluster partition, etc. + Red: Indicates severe warnings + Yellow: Indicates light warnings + Green: Indicates no warnings + """ + status = True + for precheckItem in result.result: + if precheckItem.status == "red": + print('Precheck Item failed: %s' % precheckItem.type) + print(precheckItem.reason) + status = False + return status + +def getClusterInstanceHelper(clusterName, si, host): + if clusterName: + clusterInstance = getClusterInstance(clusterName, si) + if clusterInstance is None: + print("Cluster %s is not found for %s" % (clusterName, host)) + return None + else: + print('Server or Client cluster name argument is not provided') + return None + return clusterInstance + +def mountUnmountDatastore(si, vsccs, cluster, dsList, vsanConfig, dsConfig): + spec = vim.vsan.ReconfigSpec(vsanClusterConfig=vsanConfig, + datastoreConfig=dsConfig) + vsanTask = vsccs.ReconfigureEx(cluster, spec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print('Failed to (un)mount remote datastore with error: %s' + % vcTask.info.error) + return -1 + print('Successfully (un)mounted remote vSAN datastore %s on cluster %s' + % (dsList[0].name, cluster.name)) + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + if aboutInfo.apiType == 'VirtualCenter': + + # Get vSAN remote datastore system from the vCenter Managed + # Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vrds = vcMos['vsan-remote-datastore-system'] + + # Get client and server cluster instances + serverCluster = getClusterInstanceHelper(args.serverClusterName, + si, args.host) + clientCluster = getClusterInstanceHelper(args.clientClusterName, + si, args.host) + if serverCluster is None or clientCluster is None: + return -1 + + """ + Mount/Unmount work with desired state mechanism. Spec needs to contain the + list of existing remote datastores. For a given spec: + Mount: Provided remote vSAN datastore(s) will be mounted to target vSAN + cluster, skip if already mounted + Unmount: All in use remote vSAN datastores of target vSAN cluster will be + unmounted if not specified in desired spec. + + """ + # Get local vSAN datastore from the server cluster + localDatastore = getLocalVsanDatastore(serverCluster) + + # Run MountPrecheck API and verify the result for failures + if len(localDatastore) > 0: + print('Running MountPrecheck on cluster: %s' % clientCluster.name) + result = vrds.MountPrecheck(clientCluster, localDatastore[0]) + if verifyPrecheckFailedResult(result): + + vsccs = vcMos['vsan-cluster-config-system'] + vsanConfig = vim.vsan.cluster.ConfigInfo(enabled=None) + + # Mounting a remote datastore + print('Mounting remote datastore on cluster: %s' + % clientCluster.name) + dsConfig = vim.vsan.AdvancedDatastoreConfig( + remoteDatastores=localDatastore) + mountUnmountDatastore(si, vsccs, clientCluster, localDatastore, + vsanConfig, dsConfig) + + # Unmounting a remote datastore + print('Unmounting remote datastore from cluster: %s' + % clientCluster.name) + dsConfig = vim.vsan.AdvancedDatastoreConfig(remoteDatastores=[]) + mountUnmountDatastore(si, vsccs, clientCluster, localDatastore, + vsanConfig, dsConfig) + else: + print('Error: No local vSAN datastore found for server cluster %s' + % serverCluster.name) + else: + print('Host provided should be a Virtual Center') + return -1 + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanAlarmConfigSample.py b/vsan-samples/vsanAlarmConfigSample.py new file mode 100644 index 00000000..bf0f7a14 --- /dev/null +++ b/vsan-samples/vsanAlarmConfigSample.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2023-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +Starting with 8.0 Update 3, we offer users the capability to configure +SSD endurance alarms for vSAN ESA clusters. + +It provides the example of simplifing the process of setting up alarms +by allowing users to configure alarms through the vSAN SDK, eliminating +the need to add alarm rules individually. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim, VmomiSupport +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +TARGET_ALARM = 'alarm.esx.problem.vsan.health.ssd.endurance' + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + args = parser.parse_args() + return args + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + if aboutInfo.apiType != 'VirtualCenter': + print("The type of target is not vCenter...") + return -1 + + alarmManager = si.content.alarmManager + alarms = alarmManager.GetAlarm(si.content.rootFolder) + targetAlarm = None + for alarm in alarms: + if alarm.info.systemName == TARGET_ALARM: + targetAlarm = alarm + break + if not targetAlarm: + print('Alarm: %s is not found' % TARGET_ALARM) + return -1 + + expressions = [] + # Alarm rule for cluster name not found + expressions.append( + vim.alarm.EventAlarmExpression( + eventType = vim.event.EventEx, + eventTypeId = 'vsan.health.test.ssdendurance.clusternotfound.event', + objectType = vim.HostSystem, + comparisons = [], + status = 'yellow' + ) + ) + + # Alarm rules for disk percentage threshold configuration + comparisons = [ + vim.EventAlarmExpressionComparison( + attributeName = 'Disk Percentage Threshold', + value = '95', + operator = 'equals', + ), vim.EventAlarmExpressionComparison( + attributeName = 'Cluster Name', + # Update cluster name here to existing cluster name in the inventory + value = 'vSAN-ESA-Cluster', + operator = 'equals', + ), vim.EventAlarmExpressionComparison( + attributeName = 'host.name', + # Update host name here to existing cluster name in the inventory + value = '10.1.2.3', + operator = 'equals', + ), vim.EventAlarmExpressionComparison( + attributeName = 'Disk Name', + # Update disk and operator name here according to target host + value = 't10.NVMe', + operator = 'startsWith', + ) + ] + + expressions.append( + vim.alarm.EventAlarmExpression( + eventType = vim.event.EventEx, + eventTypeId = 'esx.problem.vsan.health.ssd.endurance', + objectType = vim.HostSystem, + comparisons = comparisons, + status = 'red' + ) + ) + + # Reconfigure alarm + info = targetAlarm.info + spec = vim.alarm.AlarmSpec( + action=info.action, + name=info.name, + systemName=info.systemName, + actionFrequency = info.actionFrequency, + description=info.description, + enabled=info.enabled, + expression=vim.alarm.OrAlarmExpression(expression=expressions), + setting=info.setting, + ) + targetAlarm.ReconfigureAlarm(spec) + + print('Alarm reconfiguration is completed') + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanIOTripAnalyzerScheduleSamples.py b/vsan-samples/vsanIOTripAnalyzerScheduleSamples.py new file mode 100644 index 00000000..f9608f8c --- /dev/null +++ b/vsan-samples/vsanIOTripAnalyzerScheduleSamples.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2022-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample code for IO trip analyzer schedules configuration. + +To provide an example of IO trip analyzer recurrence configuration, it shows how +to get a cluster's IO trip analyzer scheduler configuration, and how to create, +edit or delete an IO trip analyzer scheduler recurrence. + +""" + +__author__ = 'Broadcom, Inc' + +import argparse +import atexit +import time +import datetime +import getpass +import ssl +import sys +import vsanapiutils +from pyVmomi import vim +from pyVim.connect import SmartConnect, Disconnect + + +TIME_STRING_FORMAT = 'YYYY-MM-DD HH:MM' +TIME_FORMAT = '%Y-%m-%d %H:%M' + + +def validTime(timeStr): + try: + timestamp = time.mktime(time.strptime(timeStr, TIME_FORMAT)) + return datetime.datetime.utcfromtimestamp(timestamp) + except ValueError: + msg = "not a valid time: " + timeStr + raise argparse.ArgumentTypeError(msg) + + +def addArgumentsOfRecurrence(parser, isCreate): + # name + parser.add_argument( + '--name', required=not isCreate, action='store', metavar='recurrenceName', + help="The unique name for this recurrence setting.") + # target vm + parser.add_argument( + '--vm', dest='vmName', metavar='VM', required=isCreate, + help='Name of the target virtual machine to run IO trip analyzer' + ' diagnostics.') + # startTime + parser.add_argument( + '--startTime', required=isCreate, action='store', type=validTime, + help='The start time for the recurrence. Format: %s' % TIME_STRING_FORMAT) + # endTime + parser.add_argument( + '--endTime', required=False, action='store', type=validTime, + help='The end time for the recurrence. If not set, the recurrence will' + ' not end. Format: %s' % TIME_STRING_FORMAT) + # duration + parser.add_argument( + '--duration', required=isCreate, type=int, action='store', + help='The diagnostic duration for each IO trip analyzer occurence. The' + ' unit is second.') + # interval + parser.add_argument( + '--interval', required=isCreate, type=int, action='store', + help='The time interval between two IO trip analyzer tasks. If the value' + ' is set to 0, it means it is a one-time scheduling. Unit is second.') + # status + parser.add_argument( + '--status', required=False, action='store', + choices=[vim.vsan.VsanIOTripAnalyzerRecurrenceStatus.recurrenceEnabled, + vim.vsan.VsanIOTripAnalyzerRecurrenceStatus.recurrenceDisabled], + help='The recurrence status.') + + +def getArgs(): + """ + Supports the command-line arguments listed below. + """ + commonArgsParser = argparse.ArgumentParser( + description='Args for connecting to vCenter server and the cluster', + add_help=False) + commonArgsParser.add_argument('-s', '--vc', required=True, action='store', + help='Remote vCenter Server to connect to') + commonArgsParser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + commonArgsParser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to vCenter Server') + commonArgsParser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to vCenter Server') + commonArgsParser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + subParsers = parser.add_subparsers(dest='action', title='subcommands') + + # arguments for creating an recurrence + parserCreateRecur = subParsers.add_parser( + 'create', help='create an IO trip analyzer recurrence', + parents=[commonArgsParser]) + addArgumentsOfRecurrence(parserCreateRecur, isCreate=True) + + # arguments for editing an recurrence + parserEditRecur = subParsers.add_parser( + 'edit', help='edit an existing IO trip analyzer recurrence', + parents=[commonArgsParser]) + addArgumentsOfRecurrence(parserEditRecur, isCreate=False) + + # arguments for removing an recurrence + parserRemoveRecur = subParsers.add_parser( + 'remove', help='remove an existing IO trip analyzer recurrence', + parents=[commonArgsParser]) + parserRemoveRecur.add_argument( + '--name', required=True, action='store', metavar='recurrenceName', + help="Name of the recurrence to be removed") + + # arguments for getting cluster's recurrences + parserGetRecur = subParsers.add_parser( + 'get', help="get the cluster's IO trip analyzer recurrences", + parents=[commonArgsParser]) + + args = parser.parse_args() + return args + + +def connectToServers(args): + """ + """ + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for vc %s and ' + 'user %s: ' % (args.vc, args.user)) + + # For python 2.7.9 and later, the default SSL context has stricter + # connection handshaking rule, hence we are turning off the hostname checking + # and client side cert verification. + sslContext = None + if sys.version_info[:3] > (2, 7, 8): + sslContext = ssl.create_default_context() + sslContext.check_hostname = False + sslContext.verify_mode = ssl.CERT_NONE + + # Connect to vCenter, get vc service instance + si = SmartConnect(host=args.vc, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=sslContext) + atexit.register(Disconnect, si) + + aboutInfo = si.content.about + if aboutInfo.apiType != 'VirtualCenter': + raise Exception("The sample script should be run against vc.") + + # Get vSAN diagnostics system from the vCenter Managed Object references. + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.vc, int(args.port)) + vsanVcMos = vsanapiutils.GetVsanVcMos(si._stub, + context=sslContext, + version=apiVersion) + cds = vsanVcMos['vsan-cluster-diagnostics-system'] + + return (si, cds) + + +def getClusterOrVMInstance(content, entityName, isCluster): + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + if isCluster: + folder = datacenter.hostFolder + else: + folder = datacenter.vmFolder + instance = getManagedEntityInstance(searchIndex, folder, entityName) + if instance is not None: + return instance + return None + + +def getManagedEntityInstance(searchIndex, folder, entityName): + # searches the immediate children of folder + instance = searchIndex.FindChild(folder, entityName) + if instance is not None: + return instance + # searches the child folders + for child in folder.childEntity: + if isinstance(child, vim.Folder): + instance = getManagedEntityInstance(searchIndex, child, entityName) + if instance is not None: + return instance + return None + + +def createRecurrence(content, cds, cluster, args): + vm = getClusterOrVMInstance(content, args.vmName, isCluster=False) + if vm is None: + raise Exception("VM %s is not found for %s" % (args.vmName, args.vc)) + target = vim.vsan.IODiagnosticsTarget( + type=vim.vsan.IODiagnosticsTargetType.VirtualMachine, + entityId=vm._moId) + if args.status is None: + args.status = \ + vim.vsan.VsanIOTripAnalyzerRecurrenceStatus.recurrenceEnabled + # note: startTime and endTime should be a utc datetime + spec = vim.vsan.VsanIOTripAnalyzerRecurrence(name=args.name, + targets=[target], + startTime=args.startTime, + endTime=args.endTime, + duration=args.duration, + interval=args.interval, + status=args.status) + recurs = cds.CreateIOTripAnalyzerRecurrences(cluster, recurrences=[spec]) + print("Recurrence %s has been created sucessfully!" % recurs[0].name) + print("The detail of the recurrence is: %s" % recurs[0]) + + +def editRecurrence(content, cds, cluster, args): + existingSpec = None + config = cds.GetIOTripAnalyzerSchedulerConfig(cluster) + for recurrence in config.recurrences: + if recurrence.name == args.name: + existingSpec = recurrence + break + if existingSpec is None: + raise Exception("Recurrence %s does not exist" % args.name) + # get vm instance + target = None + if args.vmName: + vm = getClusterOrVMInstance(content, args.vmName, isCluster=False) + if vm is None: + raise Exception("VM %s is not found for %s" % (args.vmName, args.vc)) + target = vim.vsan.IODiagnosticsTarget( + type=vim.vsan.IODiagnosticsTargetType.VirtualMachine, + entityId=vm._moId) + spec = vim.vsan.VsanIOTripAnalyzerRecurrence( + name=existingSpec.name, + targets=[target] if target is not None else existingSpec.targets, + startTime=args.startTime if args.startTime else existingSpec.startTime, + endTime=args.endTime if args.endTime else existingSpec.endTime, + duration=args.duration if args.duration else existingSpec.duration, + interval=args.interval if args.interval else existingSpec.interval, + status=args.status if args.status else existingSpec.status) + recurs = cds.EditIOTripAnalyzerRecurrences(cluster, recurrences=[spec]) + print("Recurrence %s has been updated successfully!" % args.name) + print("The detail of the recurrence is: %s" % recurs[0]) + + +def main(): + args = getArgs() + (si, cds) = connectToServers(args) + + # get cluster instance + content = si.RetrieveContent() + cluster = getClusterOrVMInstance(content, args.clusterName, isCluster=True) + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.vc)) + return -1 + + if args.action == 'create': + createRecurrence(content, cds, cluster, args) + elif args.action == 'edit': + editRecurrence(content, cds, cluster, args) + elif args.action == 'remove': + cds.RemoveIOTripAnalyzerRecurrences(cluster, names=[args.name]) + print("Recurrence %s has been removed successfully!" % args.name) + else: + config = cds.GetIOTripAnalyzerSchedulerConfig(cluster) + print("Recurences of cluster %s:" % (args.clusterName)) + print(config.recurrences) + + +if __name__ == '__main__': + main() diff --git a/vsan-samples/vsanSpaceReportSamples.py b/vsan-samples/vsanSpaceReportSamples.py new file mode 100644 index 00000000..38c66b9d --- /dev/null +++ b/vsan-samples/vsanSpaceReportSamples.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2016-2024 Broadcom. All Rights Reserved. +Broadcom Confidential. The term "Broadcom" refers to Broadcom Inc. +and/or its subsidiaries. + +This file includes sample codes for vCenter and ESXi sides vSAN space reporting +API QuerySpaceUsage accessing. + +To provide an example of vSAN space reporting API access, it shows how to get +vSAN space usage result, including following types: +vSAN total capacity; +vSAN used capacity; +vSAN free capacity; +vSAN saved space by efficiency feature like deduplication or compression; +vSAN space usage break down by object type; +... + +""" + +__author__ = 'Broadcom, Inc' + +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input +sys.path.append("/usr/lib64/vmware-vpx/vsan-health/") +sys.path.append("/usr/lib/vmware/site-packages/") +from pyVmomi import vim, VmomiSupport, pbm +import pyVim +from pyVim.connect import SmartConnect, Disconnect + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description= + 'vSAN SDK sample application for vSAN space reporting API usage. ' + 'It queries the space usage information for the specific cluster ' + 'and print out the current space usage information, including total ' + 'capacity usage overview, space efficiency status and capacity usage ' + 'breakdown, etc.') + parser.add_argument('-v', '--vc', required=True, action='store', + help='Remote vCenter Server to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on, default is 443') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to ' + 'vCenter Server') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to vCenter ' + 'Server. If not provided, it will prompt to ' + 'ask for manually inputting the password') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster', + help='The name of the vSAN cluster which the space usage ' + 'query is going to perform on') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def connectToServers(args, sslContext): + """ + Creates connections to the vCenter, vSAN and vSAN space reporting system + @param args + @return vc service instance, cluster, vSAN space reporting system + """ + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for vc %s and ' + 'user %s: ' % (args.vc, args.user)) + + # Connect to vCenter, get vc service instance + si = SmartConnect(host=args.vc, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=sslContext) + + atexit.register(Disconnect, si) + + # Get vSAN service instance stub + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.vc, int(args.port)) + aboutInfo = si.content.about + if aboutInfo.apiType != 'VirtualCenter': + raise Exception("The sample script should be run against vc.") + + vsanStub = vsanapiutils.GetVsanVcMos(si._stub, + context = sslContext, + version = apiVersion) + + # Get vSAN cluster config system and vsan cluster health system + vss = vsanStub['vsan-cluster-space-report-system'] + + # Get cluster + cluster = getClusterInstance(args.clusterName, si) + + return (si, cluster, vss) + +def bytesToTibBytes(byteSize): + tibSize = byteSize / (2**40) + return round(tibSize, 4) + +def main(): + args = GetArgs() + + # For python 2.7.9 and later, the default SSL context has stricter + # connection handshaking rule, hence we are turning off the hostname checking + # and client side cert verification. + sslContext = None + if sys.version_info[:3] > (2,7,8): + sslContext = ssl.create_default_context() + sslContext.check_hostname = False + sslContext.verify_mode = ssl.CERT_NONE + + (si, cluster, vss) = connectToServers(args, sslContext) + + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.vc)) + return -1 + else: + # Here is an example of how to get space reporting results + # by vSAN space reporting API. + spaceResult = \ + vss.QuerySpaceUsage(cluster=cluster) + + if not spaceResult: + print("Space result is None for the given cluster %s" % \ + args.clusterName) + return -1 + + print("vSAN Space Usage Overview") + print("Total vSAN Capacity: " + "%s TiB" % bytesToTibBytes(spaceResult.totalCapacityB)) + print("Used vSAN Capacity: " + "%s TiB" % bytesToTibBytes(spaceResult.spaceOverview.usedB)) + print("Free vSAN Capacity: " + "%s TiB" % bytesToTibBytes(spaceResult.freeCapacityB)) + + if hasattr(spaceResult, 'efficientCapacity') and \ + spaceResult.efficientCapacity is not None: + print("vSAN efficiency (Deduplication / Compression) is enabled") + efficiencySavings = \ + spaceResult.efficientCapacity.logicalCapacityUsed - \ + spaceResult.efficientCapacity.physicalCapacityUsed + print("Space saved by Efficiency: " + "%s TiB" % bytesToTibBytes(efficiencySavings)) + + if hasattr(spaceResult, 'spaceEfficiencyRatio') and \ + spaceResult.spaceEfficiencyRatio is not None: + print("Space Efficiency Ratio: " + "%sx" % spaceResult.spaceEfficiencyRatio.overallRatio) + + spaceUsageByObjectType = spaceResult.spaceDetail.spaceUsageByObjectType + print("\nUsed Capacity Breakdown") + print("vdisk: %s TiB" % bytesToTibBytes(sum([ + obj.usedB for obj in spaceUsageByObjectType + if obj.objType == 'vdisk' + ]))) + print("vmswap: %s TiB" % bytesToTibBytes(sum([ + obj.usedB for obj in spaceUsageByObjectType + if obj.objType == 'vmswap' + ]))) + print("statsdb: %s TiB" % bytesToTibBytes(sum([ + obj.usedB for obj in spaceUsageByObjectType + if obj.objType == 'statsdb' + ]))) + print("namespace: %s TiB" % bytesToTibBytes(sum([ + obj.usedB for obj in spaceUsageByObjectType + if obj.objType == 'namespace' + ]))) + print("traceobject: %s TiB" % bytesToTibBytes(sum([ + obj.usedB for obj in spaceUsageByObjectType + if obj.objType == 'traceobject' + ]))) + print("esaObjectOverhead: %s TiB" % bytesToTibBytes(sum([ + obj.usedB for obj in spaceUsageByObjectType + if obj.objType == 'esaObjectOverhead' + ]))) + print("fileSystemOverhead: %s TiB" % bytesToTibBytes(sum([ + obj.usedB for obj in spaceUsageByObjectType + if obj.objType == 'fileSystemOverhead' + ]))) + + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanapisamples.py b/vsan-samples/vsanapisamples.py new file mode 100644 index 00000000..d63055cb --- /dev/null +++ b/vsan-samples/vsanapisamples.py @@ -0,0 +1,149 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2016-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for vCenter and ESXi sides vSAN API accessing. + +To provide an example of vCenter side vSAN API access, it shows how to get vSAN +cluster health status by invoking the QueryClusterHealthSummary() API of the +VsanVcClusterHealthSystem MO. + +To provide an example of ESXi side vSAN API access, it shows how to get +performance server related host information by invoking the +VsanPerfQueryNodeInformation() API of the VsanPerformanceManager MO. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + if aboutInfo.apiType == 'VirtualCenter': + + # Get vSAN health system from the vCenter Managed Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vhs = vcMos['vsan-cluster-health-system'] + + cluster = getClusterInstance(args.clusterName, si) + + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.host)) + return -1 + + # vSAN cluster health summary can be cached at vCenter. + fetchFromCache = True + fetchFromCacheAnswer = input( + 'Do you want to fetch the cluster health from cache if exists?(y/n):') + if fetchFromCacheAnswer.lower() == 'n': + fetchFromCache = False + print('Fetching cluster health from cached state: %s' % + ('Yes' if fetchFromCache else 'No')) + healthSummary = vhs.QueryClusterHealthSummary( + cluster=cluster, includeObjUuids=True, fetchFromCache=fetchFromCache) + if hasattr(healthSummary, "healthScore"): + clusterScore = healthSummary.healthScore + print("Cluster %s Health Score: %s" % (args.clusterName, clusterScore)) + + clusterStatus = healthSummary.clusterStatus + for hostStatus in clusterStatus.trackedHostsStatus: + print("Host %s Status: %s" % (hostStatus.hostname, hostStatus.status)) + + # Here is an example of how to track a task returned by the vSAN API. + vsanTask = vhs.RepairClusterObjectsImmediate(cluster); + # Convert to vCenter task and bind the MO with vCenter session. + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + print('Repairing cluster objects task completed with state: %s' + % vcTask.info.state) + + if aboutInfo.apiType == 'HostAgent': + + # Get vSAN health system from the ESXi Managed Object references. + esxMos = vsanapiutils.GetVsanEsxMos( + si._stub, context=context, version=apiVersion) + vpm = esxMos['vsan-performance-manager'] + + nodeInfo = vpm.VsanPerfQueryNodeInformation()[0] + + print('Hostname: %s' % args.host) + print(' version: %s' % nodeInfo.version) + print(' isCmmdsMaster: %s' % nodeInfo.isCmmdsMaster) + print(' isStatsMaster: %s' % nodeInfo.isStatsMaster) + print(' vsanMasterUuid: %s' % nodeInfo.vsanMasterUuid) + print(' vsanNodeUuid: %s' % nodeInfo.vsanNodeUuid) + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanclientsamples.py b/vsan-samples/vsanclientsamples.py new file mode 100644 index 00000000..0eec3623 --- /dev/null +++ b/vsan-samples/vsanclientsamples.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2016-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample code for vCenter vSAN API accessing. + +To provide an example of vCenter side vSAN API access, it shows how to run +Mount Precheck, Mount and Unmount a remote vSAN datastore using +VsanRemoteDatastoreSystem MO and check the health summary. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--servercluster', dest='serverClusterName', + metavar="CLUSTER") + parser.add_argument('--clientcluster', dest='clientClusterName', + metavar="CLUSTER") + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def getRemoteDatastores(clusterRef): + """ + Get remote vsan datastore with cluster instance + @param clusterRef Given cluster reference + @return dsList Vsan datastores + """ + # Get vsan datastore + dsList = [ds for ds in clusterRef.datastore if ds.summary.type == 'vsan'] + if len(dsList) == 0: + print("ERROR: No vSAN datastore found") + return None + return dsList + +def verifyPrecheckFailedResult(result): + """ + For checking the MountPrecheck failed result in detail + E.g. Some connectivity issue in a cluster Like, cluster partition, etc. + Red: Indicates severe warnings + Yellow: Indicates light warnings + Green: Indicates no warnings + """ + status = True + for precheckItem in result.result: + if precheckItem.status == "red": + print('Precheck Item failed: %s' % precheckItem.type) + print(precheckItem.reason) + status = False + return status + +def getClusterInstanceHelper(clusterName, si, host): + if clusterName: + clusterInstance = getClusterInstance(clusterName, si) + if clusterInstance is None: + print("Cluster %s is not found for %s" % (clusterName, host)) + return None + else: + print('Server or Client cluster name argument is not provided') + return None + return clusterInstance + +def mountUnmountDatastore(si, vsccs, cluster, dsList, vsanConfig, dsConfig): + spec = vim.vsan.ReconfigSpec(vsanClusterConfig=vsanConfig, + datastoreConfig=dsConfig) + vsanTask = vsccs.ReconfigureEx(cluster, spec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print('Failed to (un)mount remote datastore with error: %s' + % vcTask.info.error) + return -1 + print('Successfully (un)mounted remote vSAN datastore %s on cluster %s' + % (dsList[0].name, cluster.name)) + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + if aboutInfo.apiType == 'VirtualCenter': + + # Get vSAN remote datastore system from the vCenter Managed + # Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vrds = vcMos['vsan-remote-datastore-system'] + # Get non-vSAN client cluster instance + clientCluster = getClusterInstanceHelper(args.clientClusterName, + si, args.host) + # Get vSAN server cluster instance + serverCluster = getClusterInstanceHelper(args.serverClusterName, + si, args.host) + if serverCluster is None or clientCluster is None: + return -1 + + # Make the 'client' - non-vSAN cluster as Compute mode + vccs = vcMos['vsan-cluster-config-system'] + vccs.GetConfigInfoEx(clientCluster) + rs = vim.vsan.ReconfigSpec(mode=vim.vsan.Mode.Mode_Compute) + tsk = vccs.ReconfigureEx(clientCluster, rs) + tsk = vim.Task(tsk._moId, clientCluster._stub) + vsanapiutils.WaitForTasks([tsk], si) + print (tsk.info) + + """ + Mount/Unmount work with desired state mechanism. Spec needs to contain the + list of existing remote datastores. For a given spec: + Mount: Provided remote vSAN datastore(s) will be mounted to target + vSAN/Compute mode cluster, skip if already mounted + Unmount: All in use remote vSAN datastores of target vSAN/Compute mode cluster + will be unmounted if not specified in desired spec. + """ + + # Get remote datastore list from the server cluster + vsanDatastore = getRemoteDatastores(serverCluster) + + # Run MountPrecheck API and verify the result for failures + if vsanDatastore is not None: + remoteVsanDatastore = vsanDatastore[0] + print('Running MountPrecheck on cluster: %s' % clientCluster.name) + result = vrds.MountPrecheck(clientCluster, remoteVsanDatastore) + if verifyPrecheckFailedResult(result): + # if True: + vsccs = vcMos['vsan-cluster-config-system'] + vsanConfig = vim.vsan.cluster.ConfigInfo(enabled=None) + + # Mounting a remote datastore + print('Mounting remote datastore on cluster: %s' + % clientCluster.name) + dsConfig = vim.vsan.AdvancedDatastoreConfig( + remoteDatastores=vsanDatastore) + mountUnmountDatastore(si, vsccs, clientCluster, vsanDatastore, + vsanConfig, dsConfig) + + # Checking health summary of the compute cluster + chs = vcMos['vsan-cluster-health-system'] + health = chs.VsanQueryVcClusterHealthSummary(clientCluster) + print('Health summary of compute cluster %s: %s' % (clientCluster, health)) + + # Unmounting a remote datastore + print('Unmounting remote datastore from cluster: %s' + % clientCluster.name) + dsConfig = vim.vsan.AdvancedDatastoreConfig(remoteDatastores=[]) + mountUnmountDatastore(si, vsccs, clientCluster, vsanDatastore, + vsanConfig, dsConfig) + else: + print('Host provided should be a Virtual Center') + return -1 + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanclustershutdownsamples.py b/vsan-samples/vsanclustershutdownsamples.py new file mode 100644 index 00000000..ff9e2e01 --- /dev/null +++ b/vsan-samples/vsanclustershutdownsamples.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2016-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for vCenter and ESXi sides vSAN API accessing. + +To provide an example of vCenter side vSAN API access, it shows how to get vSAN +cluster health status by invoking the QueryClusterHealthSummary() API of the +VsanVcClusterHealthSystem MO and check host connection by invoking the GetRuntimeStats() +API of the VsanVcClusterConfigSystem MO. + +To provide an example of ESXi side vSAN API access, it shows how to power off/on cluster +by invoking the PerformClusterPowerAction() API of the VsanClusterPowerSystem MO. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +import sys +import ssl +import atexit +import argparse +import getpass +import json +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils +from pyVmomi import vim +import datetime +from datetime import timezone + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + parser.add_argument('--poweraction', required=True, dest='powerAction', + action='store') + args = parser.parse_args() + return args + + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + + +def precheckHealth(vchs, cluster): + print("Start cluster shutdown precheck") + healthData = vchs.QueryClusterHealthSummary( + cluster, perspective="clusterPowerOffPrecheck") + print("Start cluster shutdown precheck") + if healthData: + print("Overall health status:", healthData.overallHealth) + if healthData.overallHealth == 'green': + return True + elif healthData.overallHealthDescription: + print(healthData.overallHealthDescription) + else: + print("Failed to get health data.") + return False + # Check failed health tests + if healthData.groups is None or len(healthData.groups) == 0: + print("Groups are None") + return False + for group in healthData.groups: + if group.groupHealth != 'green': + for test in group.groupTests: + if test.testHealth != 'green': + print("FAIL:", test.testName) + return False + + +def precheckHostConnection(vccs, cluster): + print("Start cluster shutdown power on precheck") + stats = vccs.GetRuntimeStats(cluster) + disconnectedHosts = [] + for host in stats: + if host.stats is None or not host.stats: + disconnectedHosts.append(host.host) + if len(disconnectedHosts) > 0: + print("Disconnected hosts:", disconnectedHosts) + return False + return True + + +def powerOnCluster(si, vccs, vcps, cluster): + if not precheckHostConnection(vccs, cluster): + return -1 + powerActionCluster(si, vcps, cluster, "clusterPoweredOn") + + +def powerOffCluster(si, vchs, vcps, cluster): + if not precheckHealth(vchs, cluster): + return -1 + powerActionCluster(si, vcps, cluster, "clusterPoweredOff") + + +def powerActionCluster(si, vcps, cluster, action): + cspec = vim.cluster.PerformClusterPowerActionSpec() + + if action == "clusterPoweredOn": + cspec.targetPowerStatus = "clusterPoweredOn" + elif action == "clusterPoweredOff": + cspec.targetPowerStatus = "clusterPoweredOff" + cspec.powerOffReason = "Scheduled maintenance" + + vsanTask = vcps.PerformClusterPowerAction(cluster, cspec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + print('Start %s...' % cspec.targetPowerStatus) + vsanapiutils.WaitForTasks([vcTask], si) + print('Finish.') + + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host) + if aboutInfo.apiType == 'VirtualCenter': + + # Get vSAN health system from the vCenter Managed Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vcps = vcMos['vsan-cluster-power-system'] + vchs = vcMos['vsan-cluster-health-system'] + vccs = vcMos['vsan-cluster-config-system'] + cluster = getClusterInstance(args.clusterName, si) + + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.host)) + return -1 + + cluster = getClusterInstance(args.clusterName, si) + powerAction = args.powerAction + if powerAction == "poweroff": + powerOffCluster(si, vchs, vcps, cluster) + elif powerAction == "poweron": + powerOnCluster(si, vccs, vcps, cluster) + else: + print("Invalid power action.") + + else: + print("Invalid IP address, please provide the vCenter IP") + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsancnsfilesamples.py b/vsan-samples/vsancnsfilesamples.py new file mode 100644 index 00000000..518e95cd --- /dev/null +++ b/vsan-samples/vsancnsfilesamples.py @@ -0,0 +1,464 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2019-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample code for managing file volume using the vSAN +Cloud Native Storage API. + +To provide an example of vSAN CNS API access, it shows how to create +CNS file volume, query CNS file volume, update file volume metadata, +together with delete CNS file volume. + +NOTE: using vSAN CNS API for file volume requires a minimal +vsan.version.version12 Stub. + +""" + +__author__ = 'Broadcom, Inc' +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim, pbm, VmomiSupport, SoapStubAdapter +import sys +import ssl +import atexit +import argparse + +# Import the vSAN API python bindings +import vsanapiutils + +# Users can customize the parameters according to your own environment +DOMAIN_NAME = "VSANFS-PA.PRV" +IP_FQDN_DIC = {"192.168.111.2": "h192-168-111-2.example.com", + "192.168.111.3": "h192-168-111-3.example.com", + "192.168.111.4": "h192-168-111-4.example.com", + "192.168.111.5": "h192-168-111-5.example.com"} +SUBNET_MASK = "255.255.255.0" +GATEWAY_ADDRESS = "192.168.111.1" +DNS_SUFFIXES = ["example.com"] +DNS_ADDRESS = ["1.2.3.4"] + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN file service sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=True, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def getVsanDatastore(clusterName, serviceInstance): + """ + Get vsan datastore with cluster instance + @param clusterName Given cluster name + @param vcServiceInst Vc service instance + @return dsList Vsan datastores + """ + # Get cluster reference + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + clusterRef = None + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + clusterRef = cluster + break + if clusterRef is None: + print("ERROR: Cluster {0} is not found".format(clusterName)) + return None + + # Get vsan datastore + dsList = [ds for ds in clusterRef.datastore if ds.summary.type == 'vsan'] + if len(dsList) == 0: + print("ERROR: No vSAN datastore found") + return None + return dsList + +def getFileServiceDomainConfig(): + networkProfiles = [] + for ipAddress, fqdn in IP_FQDN_DIC.items(): + networkProfile = vim.vsan.FileServiceIpConfig( + dhcp=False, ipAddress=ipAddress, subnetMask=SUBNET_MASK, + gateway=GATEWAY_ADDRESS, fqdn=fqdn) + networkProfiles.append(networkProfile) + networkProfiles[0].isPrimary = True + + fileServiceDomainConfig = vim.vsan.FileServiceDomainConfig( + name = DOMAIN_NAME, + dnsServerAddresses = DNS_ADDRESS, + dnsSuffixes = DNS_SUFFIXES, + fileServerIpConfig = networkProfiles) + + return fileServiceDomainConfig + +def prepareFileVolumeCreateSpec(args, volumeName, datastores=None): + """ + Creates file volume createSpec for create api + @param args + @param volumeName Volume name + @param datastores Array of datastore + @return createSpec Specifications for volumes to be created + """ + clusterId1 = "k8_cls_1" + podEntityType = "POD" + pvcEntityType = "PERSISTENT_VOLUME_CLAIM" + pvEntityType = "PERSISTENT_VOLUME" + podEntityName1 = "test-pod1" + pvcEntityName1 = "test-pvc1" + pvEntityName1 = "test-pv1" + nameSpace = "default" + pvcLabels1 = [vim.KeyValue(key="PVCkey1", value="PVCvalue1")] + pvLabels1 = [vim.KeyValue(key="PVkey1", value="PVvalue1")] + + containerCluster = vim.cns.ContainerCluster() + containerCluster.clusterType = "KUBERNETES" + containerCluster.clusterId = clusterId1 + containerCluster.vSphereUser = args.user + + backingOption = vim.cns.VsanFileShareBackingDetails() + backingOption.capacityInMb = 1024L + createSpec = vim.cns.VolumeCreateSpec() + createSpec.name = volumeName + createSpec.volumeType = "FILE" + + netPermission = [vim.vsan.FileShareNetPermission( + ips='*', + permissions=vim.vsan.FileShareAccessType.READ_WRITE, + allowRoot=True)] + cnsFileCreateSpec = vim.cns.VSANFileCreateSpec() + cnsFileCreateSpec.softQuotaInMb = 100L + cnsFileCreateSpec.permission = netPermission + createSpec.createSpec = cnsFileCreateSpec + + referredEntityToPVC1 = vim.cns.KubernetesEntityReference() + referredEntityToPVC1.entityType = pvcEntityType + referredEntityToPVC1.entityName = pvcEntityName1 + referredEntityToPVC1.namespace = nameSpace + referredEntityToPVC1.clusterId = clusterId1 + referredEntityToPV1 = vim.cns.KubernetesEntityReference() + referredEntityToPV1.entityType = pvEntityType + referredEntityToPV1.entityName = pvEntityName1 + referredEntityToPV1.clusterId = clusterId1 + podMetaData1 = prepareK8sEntityMetaData( + entityName=podEntityName1, clusterId=clusterId1, + namespace=nameSpace, + entityType=podEntityType, + referredEntity=[referredEntityToPVC1]) + pvcMetaData1 = prepareK8sEntityMetaData( + entityName=pvcEntityName1, clusterId=clusterId1, + namespace=nameSpace, + entityType=pvcEntityType, labelkv=pvcLabels1, + referredEntity=[referredEntityToPV1]) + pvMetaData1 = prepareK8sEntityMetaData( + entityName=pvEntityName1, clusterId=clusterId1, + entityType=pvEntityType, labelkv=pvLabels1) + metadata = vim.cns.VolumeMetadata() + metadata.containerCluster = containerCluster + metadata.containerClusterArray = [containerCluster] + metadata.entityMetadata = [podMetaData1, pvcMetaData1, pvMetaData1] + + createSpec.metadata = metadata + createSpec.backingObjectDetails = backingOption + createSpec.datastores = [] + if datastores: + createSpec.datastores.extend(datastores) + + createSpecs = [] + createSpecs.append(createSpec) + return createSpecs + +def prepareFileVolumeMetadataUpdateSpec(args, volumeId): + + """ + Creates file volume updateSpec for create api + @param args + @param volumeId Volume Id + @return updateSpec Specifications for volumes to be updated + """ + clusterId1 = "k8_cls_1" + clusterId2 = "k8_cls_2" + nameSpace = "default" + clusterType = "KUBERNETES" + podEntityType = "POD" + pvcEntityType = "PERSISTENT_VOLUME_CLAIM" + pvEntityType = "PERSISTENT_VOLUME" + podEntityName1 = "test-pod1" + podEntityName2 = "test-pod2" + pvcEntityName1 = "test-pvc1" + pvcEntityName2 = "test-pvc2" + pvEntityName1 = "test-pv1" + pvcLabels1 = [vim.KeyValue(key="PVCkey1", value="PVCvalue1")] + pvcLabels2 = [vim.KeyValue(key="PVCkey2", value="PVCvalue2")] + pvLabels1 = [vim.KeyValue(key="PVkey1", value="PVvalue1")] + + updateSpec = vim.cns.VolumeMetadataUpdateSpec() + updateSpec.volumeId = volumeId + metadata = vim.cns.VolumeMetadata() + + containerCluster1 = vim.cns.ContainerCluster() + containerCluster1.clusterType = clusterType + containerCluster1.clusterId = clusterId1 + containerCluster1.vSphereUser = args.user + containerCluster2 = vim.cns.ContainerCluster() + containerCluster2.clusterType = clusterType + containerCluster2.clusterId = clusterId2 + containerCluster2.vSphereUser = args.user + metadata.containerCluster = containerCluster1 + metadata.containerClusterArray = [containerCluster1, containerCluster2] + + referredEntityToPVC1 = vim.cns.KubernetesEntityReference() + referredEntityToPVC1.entityType = pvcEntityType + referredEntityToPVC1.entityName = pvcEntityName1 + referredEntityToPVC1.namespace = nameSpace + referredEntityToPVC1.clusterId = clusterId1 + referredEntityToPVC2 = vim.cns.KubernetesEntityReference() + referredEntityToPVC2.entityType = pvcEntityType + referredEntityToPVC2.entityName = pvcEntityName2 + referredEntityToPVC2.namespace = nameSpace + referredEntityToPVC2.clusterId = clusterId2 + referredEntityToPV1 = vim.cns.KubernetesEntityReference() + referredEntityToPV1.entityType = pvEntityType + referredEntityToPV1.entityName = pvEntityName1 + referredEntityToPV1.clusterId = clusterId1 + + podMetaData1 = prepareK8sEntityMetaData( + entityName=podEntityName1, clusterId=clusterId1, + namespace=nameSpace, + entityType=podEntityType, + referredEntity=[referredEntityToPVC1]) + podMetaData2 = prepareK8sEntityMetaData( + entityName=podEntityName2, clusterId=clusterId2, + namespace=nameSpace, + entityType=podEntityType, + referredEntity=[referredEntityToPVC2]) + pvcMetaData1 = prepareK8sEntityMetaData( + entityName=pvcEntityName1, clusterId=clusterId1, + namespace=nameSpace, + entityType=pvcEntityType, labelkv=pvcLabels1, + referredEntity=[referredEntityToPV1]) + pvcMetaData2 = prepareK8sEntityMetaData( + entityName=pvcEntityName2, clusterId=clusterId2, + namespace=nameSpace, + entityType=pvcEntityType, labelkv=pvcLabels2, + referredEntity=[referredEntityToPV1]) + pvMetaData1 = prepareK8sEntityMetaData( + entityName=pvEntityName1, clusterId=clusterId1, + entityType=pvEntityType, labelkv=pvLabels1) + + metadata.entityMetadata = [podMetaData1, podMetaData2, pvcMetaData1, + pvcMetaData2, pvMetaData1] + updateSpec.metadata = metadata + updateSpecs = [] + updateSpecs.append(updateSpec) + return updateSpecs + +def prepareK8sEntityMetaData(entityName, entityType, namespace=None, + deleteFlag=None, labelkv=None, + clusterId=None, referredEntity=None): + k8sEntityMetaData = vim.cns.KubernetesEntityMetadata() + if namespace is not None: + k8sEntityMetaData.namespace = namespace + k8sEntityMetaData.entityType = entityType + k8sEntityMetaData.entityName = entityName + if clusterId is not None: + k8sEntityMetaData.clusterId = clusterId + if deleteFlag is None: + k8sEntityMetaData.delete = False + else: + k8sEntityMetaData.delete = deleteFlag + if labelkv is not None: + k8sEntityMetaData.labels = labelkv + if referredEntity is not None: + k8sEntityMetaData.referredEntity = referredEntity + return k8sEntityMetaData + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for VC %s and ' + 'user %s: ' % (args.host, args.user)) + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname + # checking and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + # Connect to vCenter, get vc service instance + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + if aboutInfo.apiType != 'VirtualCenter': + print("The vSAN file service APIs are only available on vCenter") + return -1 + + datastores = getVsanDatastore(args.clusterName, si) + if datastores is None or len(datastores)==0: + print("vsan datastore is not found for %s" % (args.host)) + return -1 + + cluster = getClusterInstance(args.clusterName, si) + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.host)) + return -1 + + vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context, + version=apiVersion) + vcfs = vcMos['vsan-cluster-file-service-system'] + vccs = vcMos['vsan-cluster-config-system'] + volmgr = vcMos['cns-volume-manager'] + + # Find OVF download url + print("Finding OVF download url ...") + ovfUrl = vcfs.FindOvfDownloadUrl(cluster) + if not ovfUrl: + print("Failed to find the OVF download url.") + return -1 + print("Found OVF download url: %s" % ovfUrl) + + # Download FSVM OVF files to vCenter + print("Downloading ovf files from %s to vCenter ..." % ovfUrl) + vsanTask = vcfs.DownloadFileServiceOvf(downloadUrl=ovfUrl) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to download ovf files with error: %s" + % vcTask.infor.error) + return -1 + print("Downloaded ovf files to vCenter successfully") + + # Enable file service + print("Enabling the file service") + network = cluster.host[0].network[0] + fileServiceConfig = vim.vsan.FileServiceConfig( + enabled=True, + network=network, + domains=[], + ) + clusterSpec = vim.vsan.ReconfigSpec(fileServiceConfig=fileServiceConfig) + vsanTask = vccs.ReconfigureEx(cluster, clusterSpec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to enable file service with error: %s" + % vcTask.info.error) + return -1 + print("Enabled file service successfully") + + # Create file service domain + fsDomainConfig = getFileServiceDomainConfig() + domainName = fsDomainConfig.name + print("Creating file service domain") + vsanTask = vcfs.CreateFileServiceDomain(fsDomainConfig, cluster) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to create file service domain with error: %s" + % vcTask.info.error) + return -1 + print("Created file service domain %s successfully" + % domainName) + + # Create file volume + volumeName = "file_volume_sdk_test" + print("Creating file volume: %s" % volumeName) + createSpecs = prepareFileVolumeCreateSpec(args, + volumeName=volumeName, + datastores=datastores) + cnsCreateTask = volmgr.Create(createSpecs) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(cnsCreateTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + print(('Create CNS file volume task finished with status: %s' % + vcTask.info.state)) + if vcTask.info.error is not None: + print("Create CNS file volume failed with error %s" + % vcTask.info.error) + return -1 + + # Query CNS volume + print("Querying file volume with volumeName: %s" % volumeName) + filterSpec = vim.cns.QueryFilter() + filterSpec.names = [volumeName] + volumeQueryResult = volmgr.Query(filterSpec) + print("CNS query result: {}".format(volumeQueryResult)) + if volumeQueryResult is None: + print("ERROR: Query CNS volume failed, result is %s" + % volumeQueryResult) + return -1 + volumeId = volumeQueryResult.volumes[0].volumeId + + # Update volume metadata + print("Updating file volume metadata with volumeId: %s" % volumeId.id) + updateSpecs = prepareFileVolumeMetadataUpdateSpec(args, volumeId) + cnsUpdateTask = volmgr.UpdateVolumeMetadata(updateSpecs) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(cnsUpdateTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + print(('Update CNS file volume task finished with status: %s' % + vcTask.info.state)) + if vcTask.info.error is not None: + print("Update CNS file volume failed with error %s" + % vcTask.info.error) + return -1 + + # Delete CNS volume + print("Deleting file volume with volumeId: %s" % volumeId.id) + cnsDeleteTask = volmgr.Delete([volumeId], deleteDisk=True) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(cnsDeleteTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + print(('Delete CNS volume task finished with status: %s' + % vcTask.info.state)) + if vcTask.info.error is not None: + print("Delete CNS volume failed with error %s" + % vcTask.info.error) + return -1 + + # Disable file service + print("Disabling file service") + fileServiceConfig = vim.vsan.FileServiceConfig(enabled=False) + clusterSpec = vim.vsan.ReconfigSpec(fileServiceConfig=fileServiceConfig) + vsanTask = vccs.ReconfigureEx(cluster, clusterSpec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to disable file service with error: %s" + % vcTask.info.error) + return -1 + print("Disabled file service successfully") + + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsancnssamples.py b/vsan-samples/vsancnssamples.py new file mode 100644 index 00000000..fc7c6b68 --- /dev/null +++ b/vsan-samples/vsancnssamples.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2019-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample code for the vSAN Cloud Native Storage API. + +To provide an example of vSAN CNS API access, it shows how to create CNS volume, +query CNS volume, together with delete CNS volume. + +NOTE: using vSAN CNS API requires a minimal vim.version.version11 Stub. + +usage: vsancnssamples.py [-h] -s HOST [-o PORT] -u USER [-p PASSWORD] [--cluster CLUSTER] + -h, --help show this help message and exit + -s HOST, --host HOST Remote vCenter host to connect to + -o PORT, --port PORT Port to connect on + -u USER, --user USER User name to use when connecting to host + -p PASSWORD, --password PASSWORD + Password to use when connecting to host + --cluster CLUSTER + +""" + +__author__ = 'Broadcom, Inc' + +import sys +import ssl +import atexit +import argparse +import getpass +import vsanapiutils + +from pyVmomi import vim +from pyVim.connect import SmartConnect, Disconnect + +import pyVmomi +import vsanmgmtObjects + +if sys.version[0] < '3': + input = raw_input + +def main(): + args = GetArgs() + + # Create connection and get vc service instance and CNS volume manager stub + (vcServiceInst, cnsVolumeManager) = connectToServers(args) + + # Create CNS volume + volumeName = "volume_sdk_test" + datastores = GetVsanDatastore(args.clusterName, vcServiceInst) + createSpecs = PrepareVolumeCreateSpec(args, + volumeName=volumeName, + datastores=datastores) + cnsCreateTask = cnsVolumeManager.Create(createSpecs) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(cnsCreateTask, vcServiceInst._stub) + vsanapiutils.WaitForTasks([vcTask], vcServiceInst) + print(('Create CNS volume task finished with status: %s' % + vcTask.info.state)) + if vcTask.info.error is not None: + msg = "Create CNS volume failed with error '{0}'".format(vcTask.info.error) + sys.exit(msg) + + # Query CNS volume + filterSpec = vim.cns.QueryFilter() + filterSpec.names = [volumeName] + volumeQueryResult = cnsVolumeManager.Query(filterSpec) + print("CNS query result: {}".format(volumeQueryResult)) + if volumeQueryResult is None: + msg = "ERROR: Query CNS volume failed. result is \n {0}".format(volumeQueryResult) + sys.exit(msg) + volumeId = volumeQueryResult.volumes[0].volumeId + + # Delete CNS volume + cnsDeleteTask = cnsVolumeManager.Delete([volumeId], deleteDisk=True) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(cnsDeleteTask, vcServiceInst._stub) + vsanapiutils.WaitForTasks([vcTask], vcServiceInst) + print(('Delete CNS volume task finished with status: %s' % + vcTask.info.state)) + if vcTask.info.error is not None: + msg = "Delete CNS volume failed with error '{0}'".format(vcTask.info.error) + sys.exit(msg) + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote vCenter host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def connectToServers(args): + """ + Creates connections to the vCenter, vSAN and CNS volume manager + @param args + @return vc service instance, CNS volume manager stub + """ + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has stricter + # connection handshaking rule, hence we are turning off the hostname checking + # and client side cert verification. + sslContext = None + if sys.version_info[:3] > (2,7,8): + sslContext = ssl.create_default_context() + sslContext.check_hostname = False + sslContext.verify_mode = ssl.CERT_NONE + + # Connect to vCenter, get vc service instance + vcServiceInst = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=sslContext) + atexit.register(Disconnect, vcServiceInst) + + # get vSAN service instance stub + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + vsanStub = vsanapiutils.GetVsanVcMos(vcServiceInst._stub, + context = sslContext, + version = apiVersion) + + # get CNS volume manager stub + cnsVolumeManager = vsanStub['cns-volume-manager'] + + return (vcServiceInst, cnsVolumeManager) + +def GetVsanDatastore(clusterName, vcServiceInst): + """ + Get vsan datastore with cluster instance + @param clusterName Given cluster name + @param vcServiceInst Vc service instance + @return dsList Vsan datastores + """ + # get cluster reference + content = vcServiceInst.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + clusterRef = None + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + clusterRef = cluster + break + if clusterRef is None: + msg = "ERROR: Cluster {0} is not found".format(clusterName) + sys.exit(msg) + + # get vsan datastore + dsList = [ds for ds in clusterRef.datastore if ds.summary.type == 'vsan'] + if len(dsList) == 0: + msg = "ERROR: No vSAN datastore found" + sys.exit(msg) + return dsList + +def PrepareVolumeCreateSpec(args, volumeName, datastores=None): + """ + Creates createSpec for create api + @param args + @param volumeName Volume name + @param datastores Array of datastore + @return createSpec Specifications for volumes to be created + """ + containerCluster = vim.cns.ContainerCluster() + containerCluster.clusterType = "KUBERNETES" + containerCluster.clusterId = "k8_cls_1" + containerCluster.vSphereUser = args.user + backingOption = vim.cns.BlockBackingDetails() + backingOption.capacityInMb = 1024L + createSpec = vim.cns.VolumeCreateSpec() + createSpec.name = volumeName + createSpec.volumeType = "BLOCK" + + metadata = vim.cns.VolumeMetadata() + metadata.containerCluster = containerCluster + k8sEntityMetaData = vim.cns.KubernetesEntityMetadata() + k8sEntityMetaData.namespace = "default" + k8sEntityMetaData.entityType = "PERSISTENT_VOLUME_CLAIM" + k8sEntityMetaData.entityName = "test-pvc" + k8sEntityMetaData.delete = False + metadata.entityMetadata = [k8sEntityMetaData] + + createSpec.metadata = metadata + createSpec.backingObjectDetails = backingOption + createSpec.datastores = [] + if datastores: + createSpec.datastores.extend(datastores) + + createSpecs = [] + createSpecs.append(createSpec) + return createSpecs + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsandataintransitencryptionsamples.py b/vsan-samples/vsandataintransitencryptionsamples.py new file mode 100644 index 00000000..54f83c2c --- /dev/null +++ b/vsan-samples/vsandataintransitencryptionsamples.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2020-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for vCenter side vSAN Data-In-Transit +Encryption API accessing. + +To provide an example of vCenter side vSAN Data-In-Transit +Encryption API access, it shows how to set data-in-transit encryption setting +on a give cluster by invoking the ReconfigureEx() API of the +VsanVcClusterConfigSystem MO. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim +import sys +import ssl +import atexit +import argparse +import getpass +from distutils.version import LooseVersion + +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + if aboutInfo.apiType == 'VirtualCenter': + majorApiVersion = aboutInfo.apiVersion + if LooseVersion(majorApiVersion) < LooseVersion('6.7.1'): + print('The Virtual Center with version %s (lower than 6.7U3) is not ' + 'supported.' % aboutInfo.apiVersion) + return -1 + + # Get vSAN health system from the vCenter Managed Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vccs = vcMos['vsan-cluster-config-system'] + + cluster = getClusterInstance(args.clusterName, si) + + if cluster is None: + print('Cluster %s is not found for %s' % (args.clusterName, args.host)) + return -1 + + clusterReconfigSpec = vim.vsan.ReconfigSpec() + clusterReconfigSpec.dataInTransitEncryptionConfig = \ + vim.vsan.DataInTransitEncryptionConfig() + + #Set to True to enable and False to disable data-in-transit encryption. + #If the value is left unset, it will leave current state unchanged. + clusterReconfigSpec.dataInTransitEncryptionConfig.enabled = True + + #Periodical rekeying interval in minutes. + #Default interval is 1440, i.e. 24 hours. + #For release build, minimal interval is 30 minutes + #and maximum is 10080, i.e. 7 days. + clusterReconfigSpec.dataInTransitEncryptionConfig.rekeyInterval = 30 + + ditEncryptionConfigTask = vccs.ReconfigureEx(cluster, clusterReconfigSpec) + + ditEncryptionConfigVcTask = vsanapiutils.ConvertVsanTaskToVcTask( + ditEncryptionConfigTask, si._stub) + vsanapiutils.WaitForTasks([ditEncryptionConfigVcTask], si) + + print('Set vSAN data-in-transit encryption finished with ' + 'status: %s' % ditEncryptionConfigVcTask.info.state) + else: + print('Remove host should be a Virtual Center ') + return -1 + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsandeployersamples.py b/vsan-samples/vsandeployersamples.py new file mode 100644 index 00000000..c369562e --- /dev/null +++ b/vsan-samples/vsandeployersamples.py @@ -0,0 +1,228 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2022-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for vCenter and ESXi sides vSAN API accessing. + +To provide an example of ESXi side vSAN API access, it shows how to bootstrap +vSAN ESA from a single ESXi host by invoking the VsanPrepareVsanForVcsa() API +of the VsanVcsaDeployerSystem MO. + +To provide an exmaple of VCSA side vSAN API access, it shows how to complete +vSAN ESA bootstrapping on the VCSA by invoking the VsanPostConfigForVcsa() API +of the VsanVcsaDeployerSystem MO. +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +import sys +import ssl +import atexit +import argparse +import getpass +import pyVmomi +from pyVmomi import vim +from datetime import datetime +import time +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser(description= +''' +General workflow of VC on vSAN bootstrapping: + + 1. Use VsanPrepareVsanForVcsa() API to setup vSAN / vSAN ESA datastore on a fresh ESXi host. + E.g.: ./vsandeployersamples.py -s -u -p + + 2. Use tools like ovftool to deploy VCSA onto the vSAN / vSAN ESA datastore on previous ESXi host + + 3. Use VsanPostConfigForVcsa() API to setup the newly installed VCSA, that includes: + a) creating datacenter & cluster; + b) add the first host & remaining hosts into the cluster, etc. + E.g.: ./vsandeployersamples.py -s -u -p --datacenter + --cluster --esxIPs --esxIPs + --esxUserName --esxPassword + +Process args for vSAN SDK sample application: +''', formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--datacenter', required=False, dest='dcName', + metavar="DATACENTER", default='DataCenter', + help='DataCenter to be created') + parser.add_argument('--cluster', required=False, dest='clusterName', + metavar="CLUSTER", default='vSAN-ESA-Cluster', + help='Cluster to be created') + parser.add_argument('--esxIPs', required=False, dest='esxIPs', nargs='*', + metavar="ESXIPS", help='ESX IP to be added into the cluster. Note: The first host must be the one previously bootstrapped vSAN') + parser.add_argument('--esxUserName', required=False, dest='esxUserName', + metavar="ESXUSERNAME", help='username of the ESX to be added into the cluster') + parser.add_argument('--esxPassword', required=False, dest='esxPassword', + metavar="ESXPASSWORD", help='password of the ESX to be added into the cluster') + args = parser.parse_args() + return args + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host) + + + if aboutInfo.apiType == 'HostAgent': + folder = si.content.rootFolder + dc = folder.childEntity[0] + hostFolder = dc.hostFolder + host = hostFolder.childEntity[0].host[0] + + # Make sure vSAN management stack is up on the host + host.configManager.vsanSystem.QueryDisksForVsan() + + # Get vSAN health system from the ESXi Managed Object references. + esxMos = vsanapiutils.GetVsanEsxMos( + si._stub, context=context, version=apiVersion) + print(esxMos) + vs = esxMos['vsanSystem'] + disks = vs.QueryDisksForVsan() + print(disks) + eligibleDisks = \ + [d.disk for d in disks if d.storagePoolDiskState == 'eligible'] + + # Prepare Storage Pool Spec for the creation of vSAN ESA datastore + spec = vim.vsan.host.AddStoragePoolDiskSpec() + spec.host = host + for disk in eligibleDisks: + storagePoolSpec = vim.vsan.host.StoragePoolDisk() + storagePoolSpec.diskName = disk.canonicalName + storagePoolSpec.diskType = vim.vsan.host.StoragePoolDiskType('singleTier') + spec.disks.append(storagePoolSpec) + + # (Optional) Prepare Native Key Provider Spec to be used for the new vSAN + # ESA cluster. This can be replaced with other Key Provider/Management Service + # that's supported by vSAN ESA, or can be skipped if encryption is not + # intended. + nativeKeyProviderSpec = vim.vsan.host.CreateNativeKeyProviderSpec( + provider="NKP_test", + keyDerivationKey='QPHPZc7MTMEQLB7WkRWkGqxCyTTMHvftlz1zX7uqQQ0=', + tpmRequired=False, + keyId = "12345677-abcd-1234-cdef-123456789abc" + ) + + # Send API call to start bootstrapping + vvds = esxMos['vsan-vcsa-deployer-system'] + vSpec = vim.vsan.VsanPrepareVsanForVcsaSpec( + vsanAddStoragePoolDiskSpec = spec, + vsanDataEncryptionConfig = vim.vsan.host.EncryptionInfo( + enabled=True + ), + createNativeKeyProviderSpec = nativeKeyProviderSpec + ) + + # Monitor bootstrapping progress + taskId = vvds.VsanPrepareVsanForVcsa(spec = vSpec) + progress = vvds.VsanVcsaGetBootstrapProgress(taskId = [taskId])[0] + while not progress.success: + if (progress.error is not None): + print("Operation Failed: ") + print(progress.error) + break + print("[%s] Current Progress: %s%% - %s" % (datetime.now().strftime('%H:%M:%S'), progress.progressPct, progress.message)) + time.sleep(5) + progress_t = vvds.VsanVcsaGetBootstrapProgress(taskId = [taskId]) + progress = progress_t[0] + + print('Bootstrapping on ESXi has completed successfully, you can continue to deploy VCSA on the vSAN ESA storage pool of ' \ + 'this host, and continue the rest bootstrapping on VCSA') + + if aboutInfo.apiType == 'VirtualCenter': + + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vvds = vcMos['vsan-vcsa-deployer-system'] + + hosts = [] + for host in args.esxIPs: + hosts.append(vim.HostConnectSpec( + force = True, + hostName = host, + userName = args.esxUserName, + password = args.esxPassword + )) + + # NOTE: This MUST be the one used when bootstrapping on the first ESXi + nativeKeyProviderSpec = vim.vsan.host.CreateNativeKeyProviderSpec( + provider="NKP_test", + keyDerivationKey='QPHPZc7MTMEQLB7WkRWkGqxCyTTMHvftlz1zX7uqQQ0=', + tpmRequired=False, + keyId = "12345677-abcd-1234-cdef-123456789abc" + ) + + vSpec = vim.VsanVcPostDeployConfigSpec( + dcName = args.dcName, + clusterName = args.clusterName, + firstHost=hosts[0], + hostsToAdd=hosts[1:], + vsanDataEncryptionConfig = vim.vsan.host.EncryptionInfo( + enabled=True, + ), + createNativeKeyProviderSpec = nativeKeyProviderSpec + ) + + taskId = vvds.VsanPostConfigForVcsa(spec=vSpec) + progress = vvds.VsanVcsaGetBootstrapProgress(taskId = [taskId])[0] + while not progress.success: + if (progress.error is not None): + print("Operation Failed: ") + print(progress.error) + break + print("[%s] Current Progress: %s%% - %s" % (datetime.now().strftime('%H:%M:%S'), progress.progressPct, progress.message)) + time.sleep(5) + progress_t = vvds.VsanVcsaGetBootstrapProgress(taskId = [taskId]) + progress = progress_t[0] + + print('Bootstrapping on VCSA has completed successfully') + + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsandirectsamples.py b/vsan-samples/vsandirectsamples.py new file mode 100644 index 00000000..a1c872d1 --- /dev/null +++ b/vsan-samples/vsandirectsamples.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2020-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample code for the vSAN Direct API. + +To provide an example of vSAN direct API access, it shows how to query eligible +disks, claim vSAN direct storages and query vsan direct storages. + +NOTE: this sample can only be run against vc whose version is equal +to or higher than 7.0 u1. + +usage: vsandirectsamples.py [-h] -s HOST [-o PORT] -u USER [-p PASSWORD] [--cluster CLUSTER] + -h, --help show this help message and exit + -s HOST, --host HOST Remote vCenter host to connect to + -o PORT, --port PORT Port to connect on + -u USER, --user USER User name to use when connecting to host + -p PASSWORD, --password PASSWORD + Password to use when connecting to host + --cluster CLUSTER + +""" + +__author__ = 'Broadcom, Inc' + +import sys +import ssl +import atexit +import argparse +import getpass +import vsanapiutils +from pyVmomi import vim +from pyVim.connect import SmartConnect, Disconnect +from pyVim import task + +import pyVmomi +import vsanmgmtObjects + +if sys.version[0] < '3': + input = raw_input + +def main(): + args = GetArgs() + (si, cluster, vdms) = connectToServers(args) + + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.host)) + return -1 + + # Query available disks + hostDisks = {} + hosts = cluster.host + for host in hosts: + disks = host.configManager.vsanSystem.QueryDisksForVsan() + eligibleDisks = [d.disk for d in disks if d.state == 'eligible'] + hostDisks[host.name] = eligibleDisks + outPutEligibleDisks = dict( + [(h, [d.canonicalName for d in hostDisks[h]]) for h in hostDisks]) + print("Eligible disks: %s" % outPutEligibleDisks) + + # Claim vSAN direct storages + for host in hosts: + eligibleDisks = hostDisks.get(host.name) + if eligibleDisks: + spec = vim.vsan.host.DiskMappingCreationSpec() + spec.host = host + spec.capacityDisks = [eligibleDisks[0]] + spec.creationType = "vsandirect" + print("Claiming disks %s for host %s" % \ + (eligibleDisks[0].canonicalName, host.name)) + tsk = vdms.InitializeDiskMappings(spec) + tsk = vim.Task(tsk._moId, si._stub) + if (task.WaitForTask(tsk) != vim.TaskInfo.State.success): + raise Exception("%s diskmapping creation task failed %s" % \ + (spec.creationType, tsk.info)) + print("Succeed in claiming disk for host %s" % host.name) + + # Query vSAN direct storages + result = {} + for host in cluster.host: + ret = vdms.QueryVsanManagedDisks(host) + retDisks = set() + result[host.name] = retDisks + for vsanDirectStorage in ret.vSANDirectDisks: + retDisks.update( + [disk.canonicalName for disk in vsanDirectStorage.scsiDisks]) + print("vSAN direct storages: %s" % result) + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote vCenter host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def connectToServers(args): + """ + Creates connections to the vCenter, vSAN and vSAN disk mangement system + @param args + @return vc service instance, cluster, vSAN disk management system + """ + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has stricter + # connection handshaking rule, hence we are turning off the hostname checking + # and client side cert verification. + sslContext = None + if sys.version_info[:3] > (2,7,8): + sslContext = ssl.create_default_context() + sslContext.check_hostname = False + sslContext.verify_mode = ssl.CERT_NONE + + # Connect to vCenter, get vc service instance + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=sslContext) + atexit.register(Disconnect, si) + + # Get vSAN service instance stub + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + aboutInfo = si.content.about + if aboutInfo.apiType != 'VirtualCenter': + raise Exception("The sample script should be run against vc.") + + vsanStub = vsanapiutils.GetVsanVcMos(si._stub, + context = sslContext, + version = apiVersion) + + # Get vSAN disk management system + vdms = vsanStub['vsan-disk-management-system'] + + # Get cluster + cluster = getClusterInstance(args.clusterName, si) + + return (si, cluster, vdms) + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanesaconfigurationsamples.py b/vsan-samples/vsanesaconfigurationsamples.py new file mode 100644 index 00000000..2e5dd163 --- /dev/null +++ b/vsan-samples/vsanesaconfigurationsamples.py @@ -0,0 +1,126 @@ + +""" +Copyright (c) 2022-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample code for vCenter to configure vSAN ESA cluster + + - ReconfigureEx + +The test setup assumes a vSphere cluster with vCenter version 8.0 and above +""" + +__author__ = 'Broadcom, Inc' + +import argparse +import sys, platform +import getpass +import ssl +import atexit +import http.cookies +import pyVim +import pyVmomi +from pyVmomi import vim, vmodl, SoapStubAdapter, VmomiSupport, SessionOrientedStub +from pyVim.connect import SmartConnect, Disconnect +import vsanapiutils +import vsanmgmtObjects +from pyVim.task import WaitForTask + + +def getArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser(description='Process args for vSAN ESA configuration samples') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote vCenter to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to vCenter') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to vCenter') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='Vsan2Cluster') + args = parser.parse_args() + return args + + +def GetClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + + +def VpxdStub2HelathStub(stub): + version1 = pyVmomi.VmomiSupport.newestVersions.Get("vsan") + sessionCookie = stub.cookie.split('"')[1] + httpContext = pyVmomi.VmomiSupport.GetHttpContext() + cookieObj = http.cookies.SimpleCookie() + cookieObj["vmware_soap_session"] = sessionCookie + httpContext["cookies"] = cookieObj + hostname = stub.host.split(":")[0] + vhStub = pyVmomi.SoapStubAdapter(host=hostname, version =version1, path = "/vsanHealth", poolSize=0) + vhStub.cookie = stub.cookie + return vhStub + + +def main(): + args = getArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for vCenter %s and ' + 'user %s: ' % (args.host, args.user)) + + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2, 7, 8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + # Fetch a service instance + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + cluster = GetClusterInstance(args.clusterName, si) + if cluster is None: + print("Cluster {} is not found for {}".format(args.clusterName, args.host)) + return -1 + + # Invoke this API to create HTTP context + vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + # get vSAN health stub + vhstub = VpxdStub2HelathStub(si._stub) + vcs= vim.cluster.VsanVcClusterConfigSystem('vsan-cluster-config-system', vhstub) + + # Step 1) Get the cluster current configuration + vcs.GetConfigInfoEx(cluster) + print("Is vSAN ESA enabled:",vcs.GetConfigInfoEx(cluster).vsanEsaEnabled) + # Step 2) Enable vSAN ESA on the cluster + rs = vim.vsan.ReconfigSpec(vsanClusterConfig=vim.vsan.cluster.ConfigInfo(enabled=True, vsanEsaEnabled=True)) + tsk = vcs.ReconfigureEx(cluster, rs) + tsk = vim.Task(tsk._moId, cluster._stub) + WaitForTask(tsk) + print(tsk.info) + # Step 3) Get the updated cluster configuration and notice the vSAN ESA flag enabled. + vcs.GetConfigInfoEx(cluster) + print("Is vSAN ESA enabled:",vcs.GetConfigInfoEx(cluster).vsanEsaEnabled) + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanesastoragepoolsamples.py b/vsan-samples/vsanesastoragepoolsamples.py new file mode 100644 index 00000000..747557ff --- /dev/null +++ b/vsan-samples/vsanesastoragepoolsamples.py @@ -0,0 +1,339 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2022-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample code for vCenter to call Single tier Storagepool +vSAN ESA APIs: + + - AddStoragePoolDisks + - DeleteStoragePoolDisk + - UnmountStoragePoolDisk + - QueryVsanManagedDisks + +The script assumes the below cluster setup: +Deployed is a vSAN ESA cluster with minimum node requirement. +There are at least 2 eligible unconsumed disks. +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +import sys +import ssl +import atexit +import argparse +import getpass +import http.cookies + +import vsanapiutils +import pyVmomi +# Import the vSAN API python bindings and utilities from pyVmomi. +import vsanmgmtObjects +from pyVmomi import vim, vmodl, SoapStubAdapter, VmomiSupport, SessionOrientedStub +from pyVim import task +DECOMISSION_MODE = 'noAction' + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote vCenter to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to vCenter') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to vCenter') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='Vsan2Cluster') + args = parser.parse_args() + return args + + +def GetClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + + +""" +Demonstrates AddStoragePoolDisks API +Add disks to Storage Pool +If the task of disk addition fails, any exception will be logged. + +Args: + cluster (vim.ClusterComputeResource): vSAN cluster instance. + vdms: vsan-disk-management-system MO instance. + spec (vim.vsan.host.AddStoragePoolDiskSpec): Specifies the data evacuation mode. + +Returns: + None. +""" + + +def addDiskToStoragePool(cluster, vdms, spec): + try: + tsk = vdms.AddStoragePoolDisks([spec]) + addDiskTask = vim.Task(tsk._moId, cluster._stub) + task.WaitForTask(addDiskTask) + print("AddDisk to storage pool operation completed") + except Exception as e: + print("AddDisk to storage pool operation failed: %s" % e) + + +""" +Demonstrates QueryDisksForVsan API +Query all vSAN disks + +Args: + host (vim.HostSystem): host reference. + +Returns: + list of all vSAN disks +""" + + +def queryVsanDisks(host): + return host.configManager.vsanSystem.QueryDisksForVsan() + +""" +Support method helps filter eligible vSAN disks. +Query all vSAN disks + +Args: + host (vim.HostSystem): host reference. + +Returns: + list eligible vSAN disks +""" + + +def queryEligibleVsanDisks(host, getCanonicalNames=False): + disks = queryVsanDisks(host) + eligibleDisks = \ + [d.disk for d in disks if d.state == 'eligible' and d.disk.ssd == True] + if getCanonicalNames: + return [d.disk.canonicalName + for d in disks if d.state == 'eligible' and d.disk.ssd == True] + else: + return eligibleDisks + + +""" +Demonstrates QueryVsanManagedDisks API +Query Storage Pool disks +On success the query returns list of vSAN ESA storage pool disks +If the Query fails, any exception will be logged. + +Args: + vdms: vsan-disk-management-system MO instance. + host: Specifies the host whose disks are to be queried. + Returns: + disks: List of storage pool disks. +""" + + +def queryStoragePoolDisks(vdms, host): + spec = vim.vsan.host.QueryVsanDisksSpec() + spec.vsanDiskType = vim.vsan.host.VsanDiskType("storagePool") + storagePoolDisks = vdms.QueryVsanManagedDisks(host, spec) + disks = [disk for storagePool in storagePoolDisks.storagePools for disk + in storagePool.storagePoolDisks] + return disks + + +""" +Demonstrates unmountDiskFromStoragePool API +Unmount disks from storage pool +If the task of disk unmount fails, any exception will be logged. +Args: + cluster (vim.ClusterComputeResource): vSAN cluster instance. + vdms: vsan-disk-management-system MO instance. + spec (vim.vsan.host.DeleteStoragePoolDiskSpec): Specifies the disk to be unmounted. + +Returns: +none +""" + + +def unmountDiskFromStoragePool(cluster, vdms, spec): + try: + tsk = vdms.UnmountStoragePoolDisks(cluster, spec) + unmountDiskTask = vim.Task(tsk._moId, cluster._stub) + task.WaitForTask(unmountDiskTask) + print("Unmount disk from storage pool operation completed") + except Exception as e: + print("unmount disk from storage pool operation failed: %s" % e) + + +""" +Demonstrates DeleteStoragePoolDisk API +Removes disks from storage pool +If the task of disk remove fails, any exception will be logged. + +Args: + cluster (vim.ClusterComputeResource): vSAN cluster instance. + vdms: vsan-disk-management-system MO instance. + spec (vim.vsan.host.DeleteStoragePoolDiskSpec): Specifies the disk to be removed. + +Returns: +none +""" + + +def removeDiskFromStoragePool(cluster, vdms, spec): + try: + tsk = vdms.DeleteStoragePoolDisk(cluster, spec) + removeDiskTask = vim.Task(tsk._moId, cluster._stub) + task.WaitForTask(removeDiskTask) + print("remove disk from storage pool operation completed") + except Exception as e: + print("remove disk from storage pool operation failed: %s" % e) + + +def VpxdStub2HelathStub(stub): + version1 = pyVmomi.VmomiSupport.newestVersions.Get("vsan") + sessionCookie = stub.cookie.split('"')[1] + httpContext = pyVmomi.VmomiSupport.GetHttpContext() + cookieObj = http.cookies.SimpleCookie() + cookieObj["vmware_soap_session"] = sessionCookie + httpContext["cookies"] = cookieObj + hostname = stub.host.split(":")[0] + vhStub = pyVmomi.SoapStubAdapter(host=hostname, version =version1, path = "/vsanHealth", poolSize=0) + vhStub.cookie = stub.cookie + return vhStub + + +# Calls VC APIs related to single tier storage +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for vCenter %s and ' + 'user %s: ' % (args.host, args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2, 7, 8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + # Fetch a service instance + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + cluster = GetClusterInstance(args.clusterName, si) + if cluster is None: + print("Cluster {} is not found for {}".format(args.clusterName, args.host)) + return -1 + + hosts = cluster.host + if len(hosts) < 2: + print("The cluster has not enough host in there. Please add 2 hosts " + "and try again.") + return -1 + + # Get vSAN disk management system + # from the vCenter Managed Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vdms = vcMos['vsan-disk-management-system'] + vhstub = VpxdStub2HelathStub(si._stub) + vcs= vim.cluster.VsanVcClusterConfigSystem('vsan-cluster-config-system', vhstub) + + # Check is vSAN ESA is configured + if vcs.GetConfigInfoEx(cluster).vsanEsaEnabled != True: + print("vSAN ESA is not enabled on cluster {}".format(args.clusterName)) + return -1 + + # Choose the host of your choice + firstHost = hosts[1] + + # Step 1) Query vSAN disks and filter out the eligible disks + # for the given host. Select the disk of your choice + # and invoke the add disks to storage pool API + # Expectation: + # This operation will be successful. + # Reason: + # The disk selected to be added is an eligible disk. + spec = vim.vsan.host.AddStoragePoolDiskSpec() + spec.host = firstHost + eligibleVsanDisks = queryEligibleVsanDisks(spec.host) + disk = eligibleVsanDisks.pop() + storagePoolSpec = vim.vsan.host.StoragePoolDisk() + storagePoolSpec.diskName = disk.canonicalName + storagePoolSpec.diskType = vim.vsan.host.StoragePoolDiskType('singleTier') + spec.disks.append(storagePoolSpec) + + print("Eligible vSAN disks : %s", [d.canonicalName for d in eligibleVsanDisks]) + addDiskToStoragePool(cluster, vdms, spec) + + # Step 2) Query storage pool disks and invoke the remove disk from + # storage pool API with no action on decommissioning. + # + # Expectation: + # This operation will be successful. + storagePoolDisks = queryStoragePoolDisks(vdms, firstHost) + print("Storage pool disks: ", [d.disk.canonicalName for d in storagePoolDisks]) + spec = vim.vsan.host.DeleteStoragePoolDiskSpec() + mspec = vim.host.MaintenanceSpec( + vsanMode=vim.vsan.host.DecommissionMode(objectAction=DECOMISSION_MODE)) + spec.diskUuids = [storagePoolDisks[0].disk.vsanDiskInfo.vsanUuid] + spec.maintenanceSpec = mspec + removeDiskFromStoragePool(cluster, vdms, spec) + + + # Step 3) Add the other disk to the pool, + # Query storage pool disks and invoke unmount disk + # from storage pool API with no action on decommissioning + # and then invoke the remove disk from datastore API. + # + # Expectation: + # This operation will be successful. + # Reason: + # The disk can be removed even when it is unmounted from the + # storage pool. + spec = vim.vsan.host.AddStoragePoolDiskSpec() + spec.host = firstHost + disk = eligibleVsanDisks.pop() + storagePoolSpec = vim.vsan.host.StoragePoolDisk() + storagePoolSpec.diskName = disk.canonicalName + storagePoolSpec.diskType = vim.vsan.host.StoragePoolDiskType('singleTier') + spec.disks.append(storagePoolSpec) + addDiskToStoragePool(cluster, vdms, spec) + + storagePoolDisks = queryStoragePoolDisks(vdms, firstHost) + spec = vim.vsan.host.DeleteStoragePoolDiskSpec() + mspec = vim.host.MaintenanceSpec( + vsanMode=vim.vsan.host.DecommissionMode(objectAction=DECOMISSION_MODE)) + spec.diskUuids = [storagePoolDisks[0].disk.vsanDiskInfo.vsanUuid] + spec.maintenanceSpec = mspec + unmountDiskFromStoragePool(cluster, vdms, spec) + + removeDiskFromStoragePool(cluster, vdms, spec) + + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanfssamples.py b/vsan-samples/vsanfssamples.py new file mode 100644 index 00000000..cf2af904 --- /dev/null +++ b/vsan-samples/vsanfssamples.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2019-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for vCenter side vSAN file service API +accessing. + +To provide an example of vSAN file service API acccess, it shows how to +download file service OVF, enable file service, create domain, create a +file share, remove a file share, remove domain, together with disable +file service. + +""" + +__author__ = 'Broadcom, Inc' +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim, pbm, VmomiSupport, SoapStubAdapter +import sys +import ssl +import atexit +import argparse + +#import the vSAN API python bindings +import vsanapiutils + +# users can customize the parameters according to your own environment +DOMAIN_NAME = "VSANFS-PA.PRV" +IP_FQDN_DIC = {"192.168.111.2": "h192-168-111-2.example.com", + "192.168.111.3": "h192-168-111-3.example.com", + "192.168.111.4": "h192-168-111-4.example.com", + "192.168.111.5": "h192-168-111-5.example.com"} +SUBNET_MASK = "255.255.255.0" +GATEWAY_ADDRESS = "192.168.111.1" +DNS_SUFFIXES = ["example.com"] +DNS_ADDRESS = ["1.2.3.4"] + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN file service sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=True, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def getFileServiceDomainConfig(): + networkProfiles = [] + for ipAddress, fqdn in IP_FQDN_DIC.items(): + networkProfile = vim.vsan.FileServiceIpConfig( + dhcp=False, ipAddress=ipAddress, subnetMask=SUBNET_MASK, + gateway=GATEWAY_ADDRESS, fqdn=fqdn) + networkProfiles.append(networkProfile) + networkProfiles[0].isPrimary = True + + fileServiceDomainConfig = vim.vsan.FileServiceDomainConfig( + name = DOMAIN_NAME, + dnsServerAddresses = DNS_ADDRESS, + dnsSuffixes = DNS_SUFFIXES, + fileServerIpConfig = networkProfiles) + + return fileServiceDomainConfig + +def connectToSpbm(stub, context): + hostname = stub.host.split(":")[0] + sessionCookie = stub.cookie.split('"')[1] + VmomiSupport.GetRequestContext()["vcSessionCookie"] = sessionCookie + + pbmStub = SoapStubAdapter( + host=hostname, + path = "/pbm/sdk", + version = "pbm.version.version2", + sslContext=context, + ) + pbmStub.cookie = stub.cookie + pbmSi = pbm.ServiceInstance("ServiceInstance", pbmStub) + return pbmSi + +def getVsanStoragePolicy(pbmSi): + resourceType = pbm.profile.ResourceType( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE + ) + + profileManager = pbmSi.RetrieveContent().profileManager + profileIds = profileManager.PbmQueryProfile(resourceType) + profiles = profileManager.PbmRetrieveContent(profileIds) + for profile in profiles: + # vSAN default storage profile possesses a unique profile ID of + # 'aa6d5a82-1c88-45da-85d3-3d74b91a5bad' across different releases. + profileId = profile.profileId.uniqueId + if (isinstance(profile, pbm.profile.CapabilityBasedProfile) and + profileId == 'aa6d5a82-1c88-45da-85d3-3d74b91a5bad'): + return vim.VirtualMachineDefinedProfileSpec(profileId=profileId) + return None + +def getFileShareConfig(stub, context, domainName): + shareName, shareQuota = 'TestShare-1', '10G' + pbmSi = connectToSpbm(stub, context) + vsanStoragePolicy = getVsanStoragePolicy(pbmSi) + if vsanStoragePolicy is None: + print("Cannot find the vSAN Storage Policy from VC server") + return None + + netPermissions = vim.vsan.FileShareNetPermission( + ips='*', + permissions=vim.vsan.FileShareAccessType.READ_WRITE, + allowRoot=True) + sharePermissions = [netPermissions] + fileShareConfig = vim.vsan.FileShareConfig( + name=shareName, + domainName=domainName, + quota=shareQuota, + storagePolicy=vsanStoragePolicy, + permission=sharePermissions) + return fileShareConfig + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for VC %s and ' + 'user %s: ' % (args.host, args.user)) + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + # Connect to vCenter, get vc service instance + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + if aboutInfo.apiType != 'VirtualCenter': + print("The vSAN file service APIs are only available on vCenter") + return -1 + + cluster = getClusterInstance(args.clusterName, si) + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.host)) + return -1 + + vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context, + version=apiVersion) + vcfs = vcMos['vsan-cluster-file-service-system'] + vccs = vcMos['vsan-cluster-config-system'] + + # Find OVF download url + print("Finding OVF download url ...") + ovfUrl = vcfs.FindOvfDownloadUrl(cluster) + if not ovfUrl: + print("Failed to find the OVF download url.") + return -1 + print("Found OVF download url: %s" % ovfUrl) + + # Download FSVM OVF files to vCenter + print("Downloading ovf files from %s to vCenter ..." % ovfUrl) + vsanTask = vcfs.DownloadFileServiceOvf(downloadUrl=ovfUrl) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to download ovf files with error: %s" + % vcTask.infor.error) + return -1 + print("Downloaded ovf files to vCenter successfully") + + # Enable file service + print("Enabling the file service") + network = cluster.host[0].network[0] + fileServiceConfig = vim.vsan.FileServiceConfig( + enabled=True, + network=network, + domains=[], + ) + clusterSpec = vim.vsan.ReconfigSpec(fileServiceConfig=fileServiceConfig) + vsanTask = vccs.ReconfigureEx(cluster, clusterSpec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to enable file service with error: %s" + % vcTask.info.error) + return -1 + print("Enabled file service successfully") + + # Create file service domain + fsDomainConfig = getFileServiceDomainConfig() + domainName = fsDomainConfig.name + print("Creating file service domain") + vsanTask = vcfs.CreateFileServiceDomain(fsDomainConfig, cluster) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to create file service domain with error: %s" + % vcTask.info.error) + return -1 + print("Created file service domain %s successfully" + % domainName) + + # Create a file share + fileShareConfig = getFileShareConfig(si._stub, context, domainName) + if not fileShareConfig: + print("Failed to get file share config") + return -1 + + fileShareName = fileShareConfig.name + print("Creating a file share: %s" % fileShareName) + vsanTask = vcfs.CreateFileShare(fileShareConfig, cluster) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to create a file share with error: %s" + % vcTask.info.error) + return -1 + print("Created file share %s successfully" % fileShareName) + + # Remove a file share + print("Removing file share: %s" % fileShareName) + fileShareQuerySpec = vim.vsan.FileShareQuerySpec() + fileShareQuerySpec.domainName = domainName + fileShareQuerySpec.names = [fileShareName] + QueryResult = vcfs.QueryFileShares(fileShareQuerySpec, cluster) + result = QueryResult.fileShares + vsanTask = vcfs.RemoveFileShare(result[0].uuid, cluster) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to remove a file share with error: %s" + % vcTask.info.error) + return -1 + print("Removed file share %s successfully" + % result[0].config.name) + + # Remove file service domain + fsDomainQuerySpec = vim.vsan.FileServiceDomainQuerySpec() + result = vcfs.QueryFileServiceDomains(fsDomainQuerySpec, cluster) + print("Removing file service domain: %s" % result[0].config.name) + vsanTask = vcfs.RemoveFileServiceDomain(result[0].uuid, cluster) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to remove file service domain with error: %s" + % vcTask.info.error) + return -1 + print("Removed file service domain %s successfully" + % result[0].config.name) + + # Disable file service + print("Disabling file service") + fileServiceConfig = vim.vsan.FileServiceConfig(enabled=False) + clusterSpec = vim.vsan.ReconfigSpec(fileServiceConfig=fileServiceConfig) + vsanTask = vccs.ReconfigureEx(cluster, clusterSpec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + if vcTask.info.state != 'success': + print("Failed to disable file service with error: %s" + % vcTask.info.error) + return -1 + print("Disabled file service successfully") + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanhealththresholdcustomizesample.py b/vsan-samples/vsanhealththresholdcustomizesample.py new file mode 100644 index 00000000..61df9ba8 --- /dev/null +++ b/vsan-samples/vsanhealththresholdcustomizesample.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2020-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample code for customize the thresholds for vSAN health +checks. + +NOTE: this sample can only work on VC with version >= 7.0 U2. + +usage: vsandirectsamples.py [-h] -s VC [-o PORT] -u USER [-p PASSWORD] + [--cluster CLUSTER] + -h, --help show this help message and exit + -s VC, --vc VC Remote vCenter server to connect to + -o PORT, --port PORT Port to connect on + -u USER, --user USER User name to use when connecting to VC + -p PASSWORD, --password PASSWORD + Password to use when connecting to VC + --cluster CLUSTER + +""" + +__author__ = 'Broadcom, Inc' + +import sys +import ssl +import atexit +import argparse +import getpass +import vsanapiutils +import time +from pyVmomi import vim +from pyVim.connect import SmartConnect, Disconnect +from pyVim import task + + +def main(): + args = GetArgs() + (si, cluster, ccs, chs) = connectToServers(args) + + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.vc)) + return -1 + + # print the current customized threshold + cluster_config = ccs.GetConfigInfoEx(cluster) + print("Current customized thresholds value is:") + print(cluster_config.vsanHealthConfig.healthCheckThresholdSpec) + + # Set customized thresholds for vSAN datastore, vSAN Direct datastore and + # vSAN managed PMem datastore + vsanReconfigSpec = vim.vsan.ReconfigSpec( + vsanHealthConfig = vim.vsan.VsanHealthConfigSpec( + healthCheckThresholdSpec = [ + vim.vsan.VsanHealthThreshold( + yellowValue=44, + redValue=55, + enabled=True, + target= + vim.vsan.VsanHealthThresholdTarget.diskspace_vsan_datastore + ), + vim.vsan.VsanHealthThreshold( + yellowValue=66, + redValue=77, + enabled=True, + target=vim.vsan.VsanHealthThresholdTarget.diskspace_vsan_direct + ), + vim.vsan.VsanHealthThreshold( + yellowValue=88, + redValue=99, + enabled=True, + target=vim.vsan.VsanHealthThresholdTarget.diskspace_vsan_pmem + ), + ] + ) + ) + ccs.ReconfigureEx(cluster, vsanReconfigSpec) + # Print the customized thresholds by vsan vc config system + print("Sleep for 10 seconds to wait the customized thresholds take effect") + time.sleep(10) + cluster_config = ccs.GetConfigInfoEx(cluster) + print("Now the customized thresholds have been changed to:") + print(cluster_config.vsanHealthConfig.healthCheckThresholdSpec) + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--vc', required=True, action='store', + help='Remote vCenter Server to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to Server') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to Server') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def connectToServers(args): + """ + Creates connections to the vCenter, vSAN and vSAN disk mangement system + @param args + @return vc service instance, cluster, vSAN disk management system + """ + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for vc %s and ' + 'user %s: ' % (args.vc, args.user)) + + # For python 2.7.9 and later, the default SSL context has stricter + # connection handshaking rule, hence we are turning off the hostname checking + # and client side cert verification. + sslContext = None + if sys.version_info[:3] > (2,7,8): + sslContext = ssl.create_default_context() + sslContext.check_hostname = False + sslContext.verify_mode = ssl.CERT_NONE + + # Connect to vCenter, get vc service instance + si = SmartConnect(host=args.vc, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=sslContext) + atexit.register(Disconnect, si) + + # Get vSAN service instance stub + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.vc, int(args.port)) + aboutInfo = si.content.about + if aboutInfo.apiType != 'VirtualCenter': + raise Exception("The sample script should be run against vc.") + + vsanStub = vsanapiutils.GetVsanVcMos(si._stub, + context = sslContext, + version = apiVersion) + + # Get vSAN cluster config system and vsan cluster health system + ccs = vsanStub['vsan-cluster-config-system'] + chs = vsanStub['vsan-cluster-health-system'] + + # Get cluster + cluster = getClusterInstance(args.clusterName, si) + + return (si, cluster, ccs, chs) + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsaniscsisamples.py b/vsan-samples/vsaniscsisamples.py new file mode 100644 index 00000000..f77487c0 --- /dev/null +++ b/vsan-samples/vsaniscsisamples.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2016-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for VC and ESXi sides vSAN iSCSI API accessing. + +To provide an example of vSAN iSCSI API access, it shows how to enable vSAN +iSCSI service, create targets and LUNs, together with disable iSCSI service. + +NOTE: using vSAN iSCSI target service API requires a minimal +vim.version.version11 Stub. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import pbm, VmomiSupport, SoapStubAdapter, vim +import sys +import ssl +import atexit +import argparse +import getpass +from distutils.version import StrictVersion + +#import the vSAN API python bindings +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN iSCSI SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def connectToSpbm(stub, context): + sessionCookie = stub.cookie.split('"')[1] + VmomiSupport.GetRequestContext()["vcSessionCookie"] = sessionCookie + pbmStub = vsanapiutils._GetVsanStub(stub, endpoint="/pbm/sdk", context=context, + version="pbm.version.version2") + pbmStub.cookie = stub.cookie + pbmSi = pbm.ServiceInstance("ServiceInstance", pbmStub) + return pbmSi + +def getVsanStoragePolicy(pbmSi): + resourceType = pbm.profile.ResourceType( + resourceType=pbm.profile.ResourceTypeEnum.STORAGE + ) + + profileManager = pbmSi.RetrieveContent().profileManager + profileIds = profileManager.PbmQueryProfile(resourceType) + profiles = profileManager.PbmRetrieveContent(profileIds) + for profile in profiles: + # vSAN default storage profile possesses a unique profile ID of + # 'aa6d5a82-1c88-45da-85d3-3d74b91a5bad' across different releases. + # Other profiles may also be looked up when needed to apply to vSAN + # iSCSI services. + profileId = profile.profileId.uniqueId + if (isinstance(profile, pbm.profile.CapabilityBasedProfile) and + profileId == 'aa6d5a82-1c88-45da-85d3-3d74b91a5bad'): + return vim.VirtualMachineDefinedProfileSpec(profileId=profileId) + return None + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn of the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + cluster = getClusterInstance(args.clusterName, si) + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.host)) + return -1 + + vcMos = vsanapiutils.GetVsanVcMos(si._stub, context=context, + version=apiVersion) + vits = vcMos['vsan-cluster-iscsi-target-system'] + vccs = vcMos['vsan-cluster-config-system'] + + # Fetch the storage policy ID for enable vSAN iSCSI service and + # create the iSCSI home object. + pbmSi = connectToSpbm(si._stub, context) + vsanStoragePolicy = getVsanStoragePolicy(pbmSi) + if vsanStoragePolicy is None: + print('Cannot find the vSAN Storage Policy from the Virtual ' + + 'Center server.') + return -1 + + # Enable iSCSI service through vSAN Cluster Reconfiguration API on VC, and + # the config port defaults to 3260 and can be customized. + defaultVsanConfigSpec = vim.cluster.VsanIscsiTargetServiceDefaultConfigSpec( + networkInterface="vmk0", + port=2300) + vitEnableSpec = vim.cluster.VsanIscsiTargetServiceSpec( + homeObjectStoragePolicy=vsanStoragePolicy, + defaultConfig=defaultVsanConfigSpec, + enabled=True) + + clusterReconfigSpec = vim.vsan.ReconfigSpec(iscsiSpec=vitEnableSpec, + modify=True) + vitEnableVsanTask = vccs.ReconfigureEx(cluster, clusterReconfigSpec) + vitEnableVcTask = vsanapiutils.ConvertVsanTaskToVcTask( + vitEnableVsanTask, si._stub) + vsanapiutils.WaitForTasks([vitEnableVcTask], si) + print('Enable vSAN iSCSI service task finished with status: %s' % + vitEnableVcTask.info.state) + + # Create vSAN iSCSI targets and an associated LUN with the size of 1GB. + targetAlias = "sampleTarget" + targetSpec = vim.cluster.VsanIscsiTargetSpec( + alias=targetAlias, + iqn='iqn.2015-08.com.vmware:vit.target1') + vsanTask = vits.AddIscsiTarget(cluster, targetSpec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + print('Create vSAN iSCSI target task finished with status: %s' % + vcTask.info.state) + + lunSize = 1 * 1024 * 1024 * 1024 # 1GB + lunSpec = vim.cluster.VsanIscsiLUNSpec( + lunId=0, + lunSize=lunSize, + storagePolicy=vsanStoragePolicy) + vsanTask = vits.AddIscsiLUN(cluster, targetAlias, lunSpec) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + print('Create vSAN iSCSI LUN task finished with status: %s' % + vcTask.info.state) + + targetList = vits.GetIscsiTargets(cluster) + print('Get vSAN iSCSI Targets: %s' % targetList) + + target = vits.GetIscsiTarget(cluster, targetAlias) + print('Get vSAN iSCSI Target: %s' % target) + + lunList = vits.GetIscsiLUNs(cluster, targetAlias) + print('Get vSAN iSCSI LUNs: %s' % lunList) + + lun = vits.GetIscsiLUN(cluster, targetAlias, 0) + print('Get vSAN iSCSI LUN: %s' % lun) + + # Remove vSAN iSCSI targets and LUN associated with the targets. + vsanTask = vits.RemoveIscsiLUN(cluster, targetAlias, 0) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + print("Remove vSAN iSCSI LUN task finished with status:%s" % + vcTask.info.state) + + vsanTask = vits.RemoveIscsiTarget(cluster, targetAlias) + vcTask = vsanapiutils.ConvertVsanTaskToVcTask(vsanTask, si._stub) + vsanapiutils.WaitForTasks([vcTask], si) + print("Remove vSAN iSCSI target task finished with status:%s" % + vcTask.info.state) + + # Disable iSCSI service through vSAN iSCSI API on vCenter. + vitDisableSpec = vim.cluster.VsanIscsiTargetServiceSpec(enabled=False) + clusterReconfigSpec = vim.vsan.ReconfigSpec(iscsiSpec=vitDisableSpec, + modify=True) + vitDisableVsanTask = vccs.ReconfigureEx(cluster, clusterReconfigSpec) + vitDisableVcTask = vsanapiutils.ConvertVsanTaskToVcTask( + vitDisableVsanTask, si._stub) + vsanapiutils.WaitForTasks([vitDisableVcTask], si) + print('Disable vSAN iSCSI service task finished with status: %s' % + vitDisableVcTask.info.state) + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanresyncetaimprovementsamples.py b/vsan-samples/vsanresyncetaimprovementsamples.py new file mode 100644 index 00000000..f3c6955e --- /dev/null +++ b/vsan-samples/vsanresyncetaimprovementsamples.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2016-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for vCenter vSAN resyncetaimprovement +API accessing. + +To provide an example of vCenter side vSAN API access, it shows how to get resyc +query summary by invoking the QuerySyncingVsanObjectsSummary() API of the +VsanVcObjectSystemImpl MO. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils +from pyVmomi import vim, vmodl + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def displayResyncSummary(res): + print('totalObjectsToSync = %s' % res.totalObjectsToSync) + print('totalBytesToSync = %s' % res.totalBytesToSync) + print('totalRecoveryETA = %s' % res.totalRecoveryETA) + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + if aboutInfo.apiType == 'VirtualCenter': + majorApiVersion = aboutInfo.apiVersion.split('.')[0] + if int(majorApiVersion) < 6: + print('The Virtual Center with version %s (lower than 6.0) is not ' + 'supported.' % aboutInfo.apiVersion) + return -1 + + # Get vSAN reync query summary from the vCenter Managed Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vhs = vcMos['vsan-cluster-object-system'] + cluster = getClusterInstance(args.clusterName, si) + + if cluster is None: + print("Cluster %s is not found for %s" % (args.clusterName, args.host)) + return -1 + + of=vim.cluster.VsanSyncingObjectFilter() + # Setting filter parameters for active objects. + of.resyncStatus = 'active' + of.resyncType = None + of.offset = 0 + of.numberOfObjects = 100 + + # Fetching resync summary from connected host in cluster. + res=vhs.QuerySyncingVsanObjectsSummary(cluster,of) + print('\nResync summary of active objects having any resync reason:') + displayResyncSummary(res) + + # Setting filter parameters for active objects with + # specific resync reason. + of.resyncStatus = 'active' + of.resyncType = 'evacuate' + of.offset = 0 + of.numberOfObjects = 100 + + # Fetching resync summary from connected host in cluster. + res=vhs.QuerySyncingVsanObjectsSummary(cluster,of) + print('\nResync summary of active objects having resync reason ' + 'as evacuate:') + displayResyncSummary(res) + + # Setting filter parameters for queued objects. + of.resyncStatus = 'queued' + of.resyncType = None + of.offset = 0 + of.numberOfObjects = 100 + + # Fetching resync summary from connected host in cluster. + res=vhs.QuerySyncingVsanObjectsSummary(cluster,of) + print('\nResync summary of queued objects having any resync reason:') + displayResyncSummary(res) + + # Setting filter parameters for queued objects + # with specific resync reason. + of.resyncStatus = 'queued' + of.resyncType = 'repair' + of.offset = 0 + of.numberOfObjects = 100 + + # Fetching resync summary from connected host in cluster. + res=vhs.QuerySyncingVsanObjectsSummary(cluster,of) + print('\nResync summary of queued objects having resync reason ' + 'as repair:') + displayResyncSummary(res) + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsansharedwitnesssample.py b/vsan-samples/vsansharedwitnesssample.py new file mode 100644 index 00000000..ea8346f2 --- /dev/null +++ b/vsan-samples/vsansharedwitnesssample.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2020-2024 Broadcom. All Rights Reserved. +The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. + +This file includes sample codes for vCenter side vSAN SharedWitness API +accessing. + +To provide an example of vCenter side vSAN SharedWitness API access, +it shows how to configure shared witness in the following scenarios: +1. For replacing multiple robo clusters of witnees into one sharedwitness + in batch: + Requirements: one sharedwitness and one or more robo clusters. + API: ReplaceWitnessHostForClusters of the VsanVcStretchedClusterSystem MO. +2. For converting one or more regular two-node vSAN clusters to robo clusters + sharing the same witness in batch: + Requirements: one sharedwitness and one or more regular two-node + vSAN clusters. + API: AddWitnessHostForClusters of the VsanVcStretchedClusterSystem MO. +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim +import sys +import ssl +import atexit +import argparse +import getpass +from distutils.version import LooseVersion + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote VirtualCenter to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-w', '--witness', required=True, action='store', + help='Remote witness node to connect to') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--roboclusters', dest='roboClusters', action='store', + help='Cluster name list of candidate vSAN robo clusters,' + ' format: "cluster_1, cluster_2, ..."') + parser.add_argument('--normalclusters', dest='normalClusters', action='store', + help='Cluster name list of candidate regular two-node vSAN' + ' clusters, format: "cluster_1, cluster_2, ..."') + args = parser.parse_args() + return args + + +def getComputeInstance(entityName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + instance = searchIndex.FindChild(datacenter.hostFolder, entityName) + if instance is not None: + return instance + return None + + +def getClusterInstances(clusterNames, serviceInstance): + clusters = [] + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + dc = None + for clusterName in clusterNames: + cluster = getComputeInstance(clusterName, serviceInstance) + if not cluster: + msg = 'ERROR: Cluster %s is not found for %s' % clusterName + sys.exit(msg) + clusters.append(cluster) + return clusters + +def checkCompatibility(si, vscs, clusterRefs, witness): + compatCheckResult = \ + vscs.QuerySharedWitnessCompatibility(witness, clusterRefs) + if not compatCheckResult.witnessHostCompatibility.compatible: + msg = "ERROR: target host %s doesn't have shared witness capability: %s" \ + % (witness.name, + compatCheckResult.witnessHostCompatibility.incompatibleReasons) + sys.exit(msg) + for clusterCompResult in compatCheckResult.roboClusterCompatibility: + if not clusterCompResult.compatible: + clusterMo = vim.ClusterComputeResource(clusterCompResult.entity._moId, + si._stub) + msg = "ERROR: cluster %s could not meet shared witness capability" \ + " requirement: %s" % \ + (clusterMo.name, clusterCompResult.incompatibleReasons) + sys.exit(msg) + +def convertToRoboClusterInBatch(si, vscs, witness, clusterRefs): + """ Convert multiple two-node vsan clusters to robo clusters + that share the same witness in batch. + + Requirements: + 1. The candidate cluster must be a two-node cluster with vsan enable. + 2. There is no network isolation between witness and the multiple + clusters given. + """ + checkCompatibility(si, vscs, clusterRefs, witness) + for cluster in clusterRefs: + if len(vscs.GetWitnessHosts(cluster)) != 0: + msg = "ERROR: cluster %s is not a regular vSAN cluster" % cluster.name + sys.exit(msg) + print("Converting normal vSAN clusters(2 nodes) '%s' to robo clusters" \ + " with shared witness %s" % \ + ([cluster.name for cluster in clusterRefs], witness.name)) + spec = vim.vsan.VsanVcStretchedClusterConfigSpec( + witnessHost = witness, + clusters = [vim.cluster.VsanStretchedClusterConfig( + cluster = cluster, + preferredFdName= 'fd1', + faultDomainConfig= + vim.cluster.VSANStretchedClusterFaultDomainConfig( + firstFdName = 'fd1', + firstFdHosts = [cluster.host[0]], + secondFdName = 'fd2', + secondFdHosts = [cluster.host[1]], + ), + ) for cluster in clusterRefs] + ) + addWitnessTask = vscs.AddWitnessHostForClusters(spec) + vsanapiutils.WaitForTasks([addWitnessTask], si) + +def replaceWitnessInBatch(si, vscs, witness, clusterRefs): + """ Replace witness with the same Shared witness in batches for + for multiple robo clusters in one operation. + + Requirements: + 1. The candidate cluster must be vSAN robo cluster. + 2. There is no network isolation between witness and the multiple + clusters given. + """ + checkCompatibility(si, vscs, clusterRefs, witness) + for cluster in clusterRefs: + if len(vscs.GetWitnessHosts(cluster)) != 1: + msg = "ERROR: cluster %s is not a robo cluster" % cluster.name + sys.exit(msg) + print("Replacing the old witness(es) with shared witness %s" \ + " for clusters: %s" % (witness.name, + [cluster.name for cluster in clusterRefs])) + spec = vim.vsan.VsanVcStretchedClusterConfigSpec( + witnessHost = witness, + clusters = [vim.cluster.VsanStretchedClusterConfig( + cluster = cluster + ) for cluster in clusterRefs] + ) + replaceWitnessTask = vscs.ReplaceWitnessHostForClusters(spec) + vsanapiutils.WaitForTasks([replaceWitnessTask], si) + +def removeWitnessForClusters(si, vscs, witness, clusterRefs): + totalTasks = [] + for cluster in clusterRefs: + print("Removing witness %s from cluster %s" % \ + (witness.name, cluster.name)) + removeTask = vscs.RemoveWitnessHost(cluster, witness) + totalTasks.append(vsanapiutils.ConvertVsanTaskToVcTask( + removeTask, si._stub)) + vsanapiutils.WaitForTasks(totalTasks, si) + +def getWitnessClusters(si, vscs, witness): + clusterNames = [] + getWitnessClustrs = vscs.QueryWitnessHostClusterInfo(witness) + for cluster in getWitnessClustrs: + clusterMo = vim.ClusterComputeResource(cluster.cluster._moId, si._stub) + clusterNames.append(clusterMo.name) + return clusterNames + +class LogWitnessStatus(object): + def __init__(self, si, vscs, witness): + self.si = si + self.vscs = vscs + self.witness = witness + + def __enter__(self): + print("Before Ops: shared witness %s has joined the following clusters:" + " %s" % (self.witness.name, + getWitnessClusters(self.si, self.vscs, self.witness))) + + def __exit__(self, *a): + print("After Ops: Now shared witness %s has joined the following" + " clusters: %s" % (self.witness.name, + getWitnessClusters(self.si, self.vscs, self.witness))) + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + if aboutInfo.apiType == 'VirtualCenter': + majorApiVersion = aboutInfo.apiVersion + if LooseVersion(majorApiVersion) < LooseVersion('7.0.1'): + msg = "The Virtual Center with version %s (lower than 7.0U1) is not "\ + "supported." % aboutInfo.apiVersion + sys.exit(msg) + # Get vSAN health system from the vCenter Managed Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vscs = vcMos['vsan-stretched-cluster-system'] + + witness = getComputeInstance(args.witness, si) + if not witness: + msg = 'Given witness host %s is not found in %s' % \ + (args.witness, args.vc) + sys.exit(msg) + witness = witness.host[0] + allClusters= [] + if args.roboClusters: + roboClusters = [clusterName.strip() for clusterName \ + in args.roboClusters.split(',')] + roboClusters = getClusterInstances(roboClusters, si) + allClusters.extend(roboClusters) + with LogWitnessStatus(si, vscs, witness): + replaceWitnessInBatch(si, vscs, witness, roboClusters) + + if args.normalClusters: + twoNodesClusters = [clusterName.strip() for clusterName \ + in args.normalClusters.split(',')] + twoNodesClusters = getClusterInstances(twoNodesClusters, si) + allClusters.extend(twoNodesClusters) + with LogWitnessStatus(si, vscs, witness): + convertToRoboClusterInBatch(si, vscs, witness, twoNodesClusters) + + with LogWitnessStatus(si, vscs, witness): + removeWitnessForClusters(si, vscs, witness, allClusters) + else: + print('Remote host should be a Virtual Center ') + return -1 + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanvumsamples.py b/vsan-samples/vsanvumsamples.py new file mode 100644 index 00000000..a03cb522 --- /dev/null +++ b/vsan-samples/vsanvumsamples.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2019-2024 Broadcom. All Rights Reserved. +Broadcom Confidential. The term "Broadcom" refers to Broadcom Inc. + +This file includes sample codes for vCenter side vSAN VUM API accessing. + +To provide an example of vCenter side vSAN VUM baseline preference API access, +it shows how to set baseline preference setting on a give cluster by invoking +the ReconfigureEx() API of the VsanVcClusterConfigSystem MO. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +from pyVmomi import vim +import sys +import ssl +import atexit +import argparse +import getpass +from distutils.version import LooseVersion + +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + if aboutInfo.apiType == 'VirtualCenter': + majorApiVersion = aboutInfo.apiVersion + if LooseVersion(majorApiVersion) < LooseVersion('6.7.1'): + print('The Virtual Center with version %s (lower than 6.7U3) is not ' + 'supported.' % aboutInfo.apiVersion) + return -1 + + # Get vSAN health system from the vCenter Managed Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vccs = vcMos['vsan-cluster-config-system'] + + cluster = getClusterInstance(args.clusterName, si) + + if cluster is None: + print('Cluster %s is not found for %s' % (args.clusterName, args.host)) + return -1 + + clusterReconfigSpec = vim.vsan.ReconfigSpec( + vumConfig=vim.vsan.VsanVumConfig( + baselinePreferenceType="latestPatch" + ) + ) + + vumConfigTask = vccs.ReconfigureEx(cluster, clusterReconfigSpec) + vumConfigVcTask = vsanapiutils.ConvertVsanTaskToVcTask( + vumConfigTask, si._stub) + vsanapiutils.WaitForTasks([vumConfigVcTask], si) + + print('Set vSAN VUM baseline preference to latestPatch finished with ' + 'status: %s' % vumConfigVcTask.info.state) + + preferenceType = \ + vccs.GetConfigInfoEx(cluster).vumConfig.baselinePreferenceType + print('Verify vSAN VUM baseline preference value: %s' % preferenceType) + else: + print('Remove host should be a Virtual Center ') + return -1 + +if __name__ == "__main__": + main() diff --git a/vsan-samples/vsanwhatifdecom30samples.py b/vsan-samples/vsanwhatifdecom30samples.py new file mode 100644 index 00000000..ded98979 --- /dev/null +++ b/vsan-samples/vsanwhatifdecom30samples.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2019-2024 Broadcom. All Rights Reserved. +Broadcom Confidential. The term "Broadcom" refers to Broadcom Inc. + +This file includes sample code for vCenter to call 2 vSAN APIs +for resource check for EMM (Enter Maintenance Mode): "PerformResourceCheck" and +"GetResourceCheckStatus". + +To provide an example of vCenter vSAN API access, it calls +"PerformResourceCheck" with "ensureObjectAccessibility" option first and then +calls "GetResourceCheckStatus" to determine the what-if Dcom3.0 stauts on the +cluster. + +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +from pyVmomi import vim, vmodl, SoapStubAdapter, VmomiSupport +from pyVim import task + + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def getClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + if aboutInfo.apiType != 'VirtualCenter': + print("Host %s is not a VC host. Please run this script on a VC host.", + args.host) + return + else: + majorApiVersion = aboutInfo.apiVersion.split('.')[0] + if int(majorApiVersion) < 6: + print('The Virtual Center with version %s ( <6.0) is not supported.' + % aboutInfo.apiVersion) + return -1 + + cluster = getClusterInstance(args.clusterName, si) + if cluster is None: + print("Cluster %s is not found for %s", args.clusterName, args.host) + return -1 + + hosts=cluster.host + if len(hosts) < 1: + print("The cluster has no host in there. Please add atleast 1 host" + + " and try again.") + return -1 + + firstHost=hosts[0] + + # Get vSAN health system from the vCenter Managed Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vscrcs = vcMos['vsan-cluster-resource-check-system'] + + mSpec = vim.host.MaintenanceSpec( + vsanMode = vim.vsan.host.DecommissionMode( + objectAction = "ensureObjectAccessibility")) + hostUuid = firstHost.configManager.vsanSystem.config.clusterInfo.nodeUuid + spec = vim.vsan.ResourceCheckSpec(operation="EnterMaintenanceMode", + entities=[hostUuid], + maintenanceSpec=mSpec) + tsk = vscrcs.PerformResourceCheck(spec, cluster) + tsk = vim.Task(tsk._moId, cluster._stub) + task.WaitForTask(tsk) + resRes = vscrcs.GetResourceCheckStatus(spec, cluster) + + # Both resource check compelted and result is green. + if (resRes.status.lower() == "resourcecheckcompleted") and \ + (resRes.result.status.lower() == "green"): + print("EMM will proceed successfully.") + else: + print("EMM will NOT be successful. %s and it is %s" + %(resRes.status, resRes.result.status)) + +if __name__ == "__main__": + main() diff --git a/vsan-samples/whatifDecom3DiskAndDiskGroupSamples.py b/vsan-samples/whatifDecom3DiskAndDiskGroupSamples.py new file mode 100644 index 00000000..3f84e6c9 --- /dev/null +++ b/vsan-samples/whatifDecom3DiskAndDiskGroupSamples.py @@ -0,0 +1,436 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Copyright (c) 2020-2024 Broadcom. All Rights Reserved. +Broadcom Confidential. The term "Broadcom" refers to Broadcom Inc. + +This file includes sample code for vCenter to call WhatIfDecomDiskAndDiskGroup +vSAN APIs: + + - PerformResourceCheck + - GetResourceCheckStatus + - RemoveDiskEx + - RemoveDiskMappingEx + - RebuildDiskMapping + - UnmountDiskMappingEx + +Cluster setup required for this sample API: +- 4 node cluster with vsan enabled +- Create 1 diskGroup with 1 cache and 2 capacity disks for first 3 nodes +(use cluster instance to find first 3 hosts) +- No diskGroup on fourth host at the beginning but it has 3 +spare disks to form a diskGroup (1 cache + 2 capacity) later. +- Deploy 1 VM on the cluster +- VM Policy is FTT = 1 +""" + +__author__ = 'Broadcom, Inc' + +from pyVim.connect import SmartConnect, Disconnect +import time +import sys +import ssl +import atexit +import argparse +import getpass +if sys.version[0] < '3': + input = raw_input + +# Import the vSAN API python bindings and utilities. +import pyVmomi +import vsanmgmtObjects +import vsanapiutils + +from pyVmomi import vim, vmodl, SoapStubAdapter, VmomiSupport +from pyVim import task + + +def GetArgs(): + """ + Supports the command-line arguments listed below. + """ + parser = argparse.ArgumentParser( + description='Process args for vSAN SDK sample application') + parser.add_argument('-s', '--host', required=True, action='store', + help='Remote host to connect to') + parser.add_argument('-o', '--port', type=int, default=443, action='store', + help='Port to connect on') + parser.add_argument('-u', '--user', required=True, action='store', + help='User name to use when connecting to host') + parser.add_argument('-p', '--password', required=False, action='store', + help='Password to use when connecting to host') + parser.add_argument('--cluster', dest='clusterName', metavar="CLUSTER", + default='VSAN-Cluster') + args = parser.parse_args() + return args + +def GetClusterInstance(clusterName, serviceInstance): + content = serviceInstance.RetrieveContent() + searchIndex = content.searchIndex + datacenters = content.rootFolder.childEntity + for datacenter in datacenters: + cluster = searchIndex.FindChild(datacenter.hostFolder, clusterName) + if cluster is not None: + return cluster + return None + +""" +Run what-if resource check for a capacity disk or a diskGroup +with the given resource check spec. +Args: + cluster (vim.ClusterComputeResource): vSAN cluster which owns + the disk or diskGroup. + vscrcs: "vsan-cluster-resource-check-system" MO instance. + spec (vim.vsan.ResourceCheckSpec): The resource check spec. +Returns: + 0: resource check result is pass or unknown. + The main workflow can still proceed. + -1: resource check result indicates there is not enough resource + in the cluster for related operations so the main workflow + should be aborted. +""" +def RunResourceCheck(cluster, vscrcs, spec): + try: + tsk = vscrcs.PerformResourceCheck(spec, cluster) + resourceCheckTask = vim.Task(tsk._moId, cluster._stub) + task.WaitForTask(resourceCheckTask) + resRes = vscrcs.GetResourceCheckStatus(spec, cluster) + except Exception as e: + print("Runtime error during resource check: %s" % e) + + # Usually the main workflow should be aborted only when the + # resource check task finishes successfully and the result shows 'red' + # status which clearly indicates there is not enough resource + # in the cluster for related operations. + # + # If the resource check task failed to complete somehow, + # we may still want to give it a try for the actual operation + # (e.g. remove a disk-group) and continue the main workflow + # (i.e. a more aggressive way with unknown resource check result). + if resRes is None: + return 0 + + if (resRes.status == vim.vsan.ResourceCheckStatusType( + "resourceCheckCompleted") and + resRes.result is not None and + resRes.result.status == "red"): + print("Disk data evacuation resource check " + "completed but the result is red.") + return -1 + else: + print("Disk data evacuation resource check: %s" % + resRes.status) + return 0 + +""" +Remove a single capacity disk from a diskGroup. +If the task of disk removal fails, any exception will be logged. + +Args: + cluster (vim.ClusterComputeResource): vSAN cluster instance. + vdms: vsan-disk-management-system MO instance. + diskToBeRemoved (vim.host.ScsiDisk): capacity disk to be removed. + mSpec (vim.host.MaintenanceSpec): Specifies the data evacuation mode. + +Returns: + None. +""" +def InvokeRemoveDiskExApi(cluster, vdms, diskToBeRemoved, mSpec): + try: + tsk = vdms.RemoveDiskEx(cluster, [diskToBeRemoved], mSpec) + removeDiskTask = vim.Task(tsk._moId, cluster._stub) + print("Starting remove disk task.") + task.WaitForTask(removeDiskTask) + except Exception as e: + print("RemoveDisk operation failed: %s" % e) + +""" +Remove vSAN disk mapping(s) from use in a vSAN cluster with the +specified data evacuation mode. If the task of diskgroup +removal fails, any exception will be logged. + +Args: + cluster (vim.ClusterComputeResource): vSAN cluster instance. + vdms: vsan-disk-management-system MO instance. + disksMaps(vim.vsan.host.DiskMapping[]): list of disk mapping(s) + to be removed. + mSpec (vim.host.MaintenanceSpec): Specifies the data evacuation mode. + +Returns: + None. +""" +def InvokeRemoveDiskMappingApi(cluster, vdms, disksMaps, mSpec): + try: + tsk = vdms.RemoveDiskMappingEx(cluster, disksMaps, mSpec) + removeDiskMappingTask = vim.Task(tsk._moId, cluster._stub) + task.WaitForTask(removeDiskMappingTask) + except Exception as e: + print("RemoveDiskMapping operation failed: %s" % e) + +""" +Unmount vSAN disk mapping(s) in a vSAN cluster with the specified +data evacuation mode. If the task of unmount disk fails, +any exception will be logged. + +Args: + cluster (vim.ClusterComputeResource): vSAN cluster instance. + vdms: vsan-disk-management-system MO instance. + disksMaps (vim.vsan.host.DiskMapping[]): list of disk mapping(s) + to be unmounted. + mSpec (vim.host.MaintenanceSpec): Specifies the data evacuation mode. + +Returns: + None. +""" +def InvokeUnmountDiskMappingExApi(cluster, vdms, disksMaps, mSpec): + try: + tsk = vdms.UnmountDiskMappingEx(cluster, disksMaps, mSpec) + unmountDiskMappingTask = vim.Task(tsk._moId, cluster._stub) + task.WaitForTask(unmountDiskMappingTask) + except Exception as e: + print("UnmountDiskMapping operation failed: %s" % e) + +""" +Rebuild an existing vSAN disk mapping on the specified host. +If the task of rebuilt disk mapping fails, any exception +will be logged. + +Args: + cluster (vim.ClusterComputeResource): vSAN cluster instance. + host: Target host which owns the diskGroup to rebuild. + vdms: vsan-disk-management-system MO instance. + disksMap (vim.vsan.host.DiskMapping): disk mapping to be rebuilt + from vSAN usage. + mSpec (vim.host.MaintenanceSpec): Specifies the data evacuation mode. + +Returns: + None. +""" +def InvokeRebuildDiskMappingApi(cluster, host, vdms, disksMap, mSpec): + try: + tsk = vdms.RebuildDiskMapping(host, disksMap, mSpec) + rebuildDiskMappingTask = vim.Task(tsk._moId, cluster._stub) + task.WaitForTask(rebuildDiskMappingTask) + except Exception as e: + print("RebuildDiskMapping operation failed: %s" % e) + +""" +Create a new vSAN diskGroup on specified host. If the task of +initializing disk mapping fails, any exception will be logged. + +Args: + cluster (vim.ClusterComputeResource): vSAN cluster instance. + host: Target host which will own created diskGroup. + vdms: vsan-disk-management-system MO instance. + +Returns: + None. +""" +def CreateDiskMapping(cluster, host, vdms): + try: + cacheDisk = None + capacityDisk = [] + disks = host.configManager.vsanSystem.QueryDisksForVsan() + + for result in disks: + if (result.state.strip() == "eligible"): + if result.disk.ssd: + cacheDisk = result.disk + else: + capacityDisk.append(result.disk) + + if cacheDisk is not None and len(capacityDisk) > 0: + spec = vim.vsan.host.DiskMappingCreationSpec( + cacheDisks = [cacheDisk], + capacityDisks = capacityDisk, + creationType = "allFlash", + host = host + ) + + tsk = vdms.InitializeDiskMappings(spec) + diskMapCreationTask = vim.Task(tsk._moId, cluster._stub) + task.WaitForTask(diskMapCreationTask) + except Exception as e: + print("Diskmapping creation task failed: %s" % e) + + +def main(): + args = GetArgs() + if args.password: + password = args.password + else: + password = getpass.getpass(prompt='Enter password for host %s and ' + 'user %s: ' % (args.host,args.user)) + + # For python 2.7.9 and later, the default SSL context has more strict + # connection handshaking rule. We may need turn off the hostname checking + # and client side cert verification. + context = None + if sys.version_info[:3] > (2,7,8): + context = ssl.create_default_context() + context.check_hostname = False + context.verify_mode = ssl.CERT_NONE + + si = SmartConnect(host=args.host, + user=args.user, + pwd=password, + port=int(args.port), + sslContext=context) + + atexit.register(Disconnect, si) + + # Detecting whether the host is vCenter or ESXi. + aboutInfo = si.content.about + apiVersion = vsanapiutils.GetLatestVmodlVersion(args.host, int(args.port)) + + if aboutInfo.apiType != 'VirtualCenter': + print("Host %s is not a VC host. Please run this script on a VC host.", + args.host) + return + else: + majorApiVersion = aboutInfo.apiVersion.split('.')[0] + if int(majorApiVersion) < 6: + print('The Virtual Center with version %s ( <6.0) is not supported.' + % aboutInfo.apiVersion) + return -1 + + cluster = GetClusterInstance(args.clusterName, si) + if cluster is None: + print("Cluster %s is not found for %s", args.clusterName, args.host) + return -1 + + hosts=cluster.host + if len(hosts) < 4: + print("The cluster has not enough host in there. Please add 4 hosts " + "and try again.") + return -1 + + # Get vSAN cluster resource check systems and disk management system + # from the vCenter Managed Object references. + vcMos = vsanapiutils.GetVsanVcMos( + si._stub, context=context, version=apiVersion) + vscrcs = vcMos['vsan-cluster-resource-check-system'] + vdms = vcMos['vsan-disk-management-system'] + + firstHost = hosts[0] + + mSpec = vim.host.MaintenanceSpec( + vsanMode = vim.vsan.host.DecommissionMode( + objectAction = "evacuateAllData")) + disksMaps = firstHost.configManager.vsanSystem.config.storageInfo.diskMapping + diskUuid = disksMaps[0].nonSsd[0].vsanDiskInfo.vsanUuid + spec = vim.vsan.ResourceCheckSpec(operation="DiskDataEvacuation", + entities=[diskUuid], + maintenanceSpec=mSpec) + + # Step 1) Check resource precheck results for evacuateAllData decom + # mode on capacity disk of diskGroup on the first host. + # If resource precheck result is pass, the test will + # proceed to remove this capacity disk. + # Expectation: + # This operation will be successful. + # Reason: + # Host has 2 capacity disks and 1 cache in diskGroup so + # by removing one capacity disk and moving data to + # another capacity disk still FTT = 1 holds. + print("Perform resource precheck for a capacity disk on host %s." + % firstHost.name) + RunResourceCheck(cluster, vscrcs, spec) + print("Removing capacity disk from host %s with evacuateAll mode." + % firstHost.name) + InvokeRemoveDiskExApi(cluster, vdms, disksMaps[0].nonSsd[0], mSpec) + + # Step 2) Check Resource precheck results on diskGroup with + # evacuateAllData mode on first host having 1 + # cache and 1 capacity disk. After resource precheck, + # try removing same diskGroup for the first host. + # Expectation: + # This operation will fail. Resource precheck result will be 'red', + # which indicates the removal of this diskGroup will fail. + # If we continue to remove the diskGroup, it will fail as expected. + # Reason: + # In the cluster, 3 hosts have 1 diskGroup and FTT = 1. By removing + # diskGroup on one host, there is no diskGroup left to move data. + # FTT policy will be violated as there is not enough fault domain + # left to hold FTT = 1 so this operation will fail. + diskUuid = disksMaps[0].ssd.vsanDiskInfo.vsanUuid + print("Perform resource precheck for disk group on host %s." + % firstHost.name) + spec = vim.vsan.ResourceCheckSpec(operation="DiskDataEvacuation", + entities=[diskUuid], + maintenanceSpec=mSpec) + RunResourceCheck(cluster, vscrcs, spec) + print("Removing disk Mapping on host %s with evacuateAll mode." + % firstHost.name) + InvokeRemoveDiskMappingApi(cluster, vdms, disksMaps, mSpec) + + # Step 3) Create a diskGroup with 1 cache and 2 capacity disks for + # fourth host in cluster. + # Expectation: + # This operation will succeed. The fourth host has 1 + # cache and 2 capacity disks to create a diskGroup. + # Result: + # A diskGroup is created on fourt host. The cluster will + # have 4 hosts each having 1 diskGroup. + print("Creating disk group on host %s" % hosts[3].name) + CreateDiskMapping(cluster, hosts[3], vdms) + + # Step 4) Check Resource precheck results for diskGroup on first host + # having 1 cache and 1 capacity disk with evacuateAllData + # mode. After resource check, try removing same diskGroup + # for the first host. + # Expectation: + # This operation will succeed. + # Reason: + # The cluster has 1 diskGroup per 4 hosts and FTT = 1. + # By removing diskGroup on one host, since we added a + # diskGroup on fourth host, data will be moved to another + # diskGroup and FTT = 1 is valid. + disksMaps = firstHost.configManager.vsanSystem.config.storageInfo.diskMapping + diskUuid = disksMaps[0].ssd.vsanDiskInfo.vsanUuid + print("Perform resource precheck for disk group on host %s." + % firstHost.name) + spec = vim.vsan.ResourceCheckSpec(operation="DiskDataEvacuation", + entities=[diskUuid], + maintenanceSpec=mSpec) + RunResourceCheck(cluster, vscrcs, spec) + print("Removing disk Mapping on host %s with evacuateAll mode." + % firstHost.name) + InvokeRemoveDiskMappingApi(cluster, vdms, disksMaps, mSpec) + + secondHost = hosts[1] + + # Step 5) Perform RebuildDiskMapping on second host with + # ensureAccessibilty mode. + # Expectation: + # This operation will succeed. + # Result: + # There are 3 hosts with 1 diskGroup and all vSAN + # objects are still accessible during/after the rebuild operation. + mSpec = vim.host.MaintenanceSpec( + vsanMode = vim.vsan.host.DecommissionMode( + objectAction = "ensureObjectAccessibility")) + disksMaps = secondHost.configManager.vsanSystem.config.storageInfo.diskMapping + print("Rebuild disk Mapping on host %s with ensure accessibilty mode." + % secondHost.name) + InvokeRebuildDiskMappingApi(cluster, secondHost, vdms, disksMaps[0], mSpec) + + # Step 6) Unmount DiskMapping on second host with noAction mode. + # Expectation: + # This operation will succeed. + # Result: + # As data evacuation mode is no Action operation will always succeed. + mSpec = vim.host.MaintenanceSpec( + vsanMode = vim.vsan.host.DecommissionMode( + objectAction = "noAction")) + disksMaps = secondHost.configManager.vsanSystem.config.storageInfo.diskMapping + print("Unmount disk Mapping on host %s with no action mode." + % secondHost.name) + InvokeUnmountDiskMappingExApi(cluster, vdms, disksMaps, mSpec) + + print("Invoking WhatIfDecomDiskAndDiskGroup APIs completed successfully.") + +if __name__ == "__main__": + main() +