Skip to content

Commit

Permalink
Improve eksctl-anywhere cluster config generation with parameters for…
Browse files Browse the repository at this point in the history
… bare metal and vSphere

Sample command for bare metal is:
    eksctl anywhere generate clusterconfig <cluster name> \
        -p tinkerbell \
        -m params_tinkerbell.yaml

Sample params_tinkerbell.yaml has content:
"""
managementClusterName: <management cluster name>
podsCidrBlocks:
  - 192.168.64.0/18
servicesCidrBlocks:
  - 10.96.0.0/12
kubernetesVersion: 1.26
cpCount: 1
workerCount: 2
cpEndpointHost: <control plane endpoint host ip>
tinkerbellIP: <tinkerbellIP>
adminIP: <admin machine ip>
osFamily: ubuntu
osImageURL: <osImageURL of K8s 1.26>
hardwareCSV: <hardware CSV file>
sshAuthorizedKeyFile: <sshKey.pub file>
tinkerbellTemplateConfigTemplateFile: tinkerbellTemplateConfigTemplateUbuntu.yaml
"""

managementClusterName is optional, the default value is <cluster name>
tinkerbellTemplateConfigTemplateFile is for advanced use cases, the default ("") is ok to use

Sample command for vSphere is:
    eksctl anywhere generate clusterconfig <cluster name>
        -p vsphere
        -m params_vsphere.yaml

Sample params_vsphere.yaml has content:
"""
managementClusterName: <management cluster name>
podsCidrBlocks:
  - 192.168.192.0/18
servicesCidrBlocks:
  - 10.96.192.0/18
cpCount: 2
etcdCount: 3
workerCount: 3
cpEndpointHost: <control plane endpoint host ip>
kubernetesVersion: 1.28
datacenter: <vDatacenter>
insecure: true
network: <vCenterNetwork>
server: <serverIP>
thumbprint: <thumprint>
datastore: <vDatastore>
folder: <folder>
cpDiskGiB: 0
cpMemoryMiB: 0
cpNumCPUs: 0
etcdDiskGiB: 0
etcdMemoryMiB: 0
etcdNumCPUs: 0
workerDiskGiB: 256
workerMemoryMiB: 65536
workerNumCPUs: 16
osFamily: "ubuntu"
resourcePool: <resource pool>
template: <template name of OS>
sshAuthorizedKeyFile: <sshKey.pub>
"""

managementClusterName is optional, the default value is <cluster name>
  • Loading branch information
ygao-armada committed Apr 14, 2024
1 parent 797d0c7 commit 51e87d5
Show file tree
Hide file tree
Showing 13 changed files with 634 additions and 48 deletions.
262 changes: 240 additions & 22 deletions cmd/eksctl-anywhere/cmd/generateclusterconfig.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
package cmd

import (
"encoding/csv"
"fmt"
"log"
"os"
"strings"

"github.com/spf13/cobra"
Expand Down Expand Up @@ -50,6 +52,7 @@ func preRunGenerateClusterConfig(cmd *cobra.Command, args []string) {
func init() {
generateCmd.AddCommand(generateClusterConfigCmd)
generateClusterConfigCmd.Flags().StringP("provider", "p", "", fmt.Sprintf("Provider to use (%s)", strings.Join(constants.SupportedProviders, " or ")))
generateClusterConfigCmd.Flags().StringP("paramsFile", "m", "", "parameters file (vsphere or tinkerbell)")
err := generateClusterConfigCmd.MarkFlagRequired("provider")
if err != nil {
log.Fatalf("marking flag as required: %v", err)
Expand All @@ -61,6 +64,30 @@ func generateClusterConfig(clusterName string) error {
var datacenterYaml []byte
var machineGroupYaml [][]byte
var clusterConfigOpts []v1alpha1.ClusterGenerateOpt
var kubernetesVersion string
var tinkerbellTemplateConfigTemplate string
var podsCidrBlocks []string
var servicesCidrBlocks []string
var paramsData []byte
var err error

// use cluster name as the default management cluster name.
managementClusterName := clusterName

if viper.IsSet("paramsFile") {
switch strings.ToLower(viper.GetString("provider")) {
case constants.VSphereProviderName:
case constants.TinkerbellProviderName:
paramsFile := viper.GetString("paramsFile")
paramsData, err = os.ReadFile(paramsFile)
if err != nil {
return fmt.Errorf("reading paramsFile: %v", err)
}
default:
return fmt.Errorf("parameter file is only supported for vsphere and tinkerbell")
}
}

switch strings.ToLower(viper.GetString("provider")) {
case constants.DockerProviderName:
datacenterConfig := v1alpha1.NewDockerDatacenterConfigGenerate(clusterName)
Expand All @@ -77,25 +104,76 @@ func generateClusterConfig(clusterName string) error {
}
datacenterYaml = dcyaml
case constants.VSphereProviderName:
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
datacenterConfig := v1alpha1.NewVSphereDatacenterConfigGenerate(clusterName)
var vSphereParams v1alpha1.VSphereClusterConfigParams
err = yaml.Unmarshal(paramsData, &vSphereParams)
if err != nil {
return fmt.Errorf("unmarshal vSphereParams: %v", err)
}

if vSphereParams.ManagementClusterName != "" {
// override the management cluster name with that from parameter file.
managementClusterName = vSphereParams.ManagementClusterName
}

// set podsCidrBlocks and servicesCidrBlocks to the values from parameter file.
podsCidrBlocks = vSphereParams.PodsCidrBlocks
servicesCidrBlocks = vSphereParams.ServicesCidrBlocks

if vSphereParams.CPEndpointHost != "" {
// add control plane endpoint config with host from parameter file.
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpointHost(vSphereParams.CPEndpointHost))
} else {
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
}

// create datacenter config with values from parameter file
datacenterConfig := v1alpha1.NewVSphereDatacenterConfigGenerate(clusterName, vSphereParams.Datacenter, vSphereParams.Network, vSphereParams.Server, vSphereParams.Thumbprint, vSphereParams.Insecure)
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
// default counts of CP nodes, Etcd nodes and worker nodes.
cpCount := 2
etcdCount := 3
workerCount := 2

if vSphereParams.CPCount != 0 {
// override counts of CP nodes with value from parameter file.
cpCount = vSphereParams.CPCount
}

if vSphereParams.EtcdCount != 0 {
// override counts of Etcd nodes with value from parameter file.
etcdCount = vSphereParams.EtcdCount
}

if vSphereParams.WorkerCount != 0 {
// override counts of Worker nodes with value from parameter file.
workerCount = vSphereParams.WorkerCount
}
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(2),
v1alpha1.ExternalETCDConfigCount(3),
v1alpha1.WorkerNodeConfigCount(2),
v1alpha1.ControlPlaneConfigCount(cpCount),
v1alpha1.ExternalETCDConfigCount(etcdCount),
v1alpha1.WorkerNodeConfigCount(workerCount),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
dcyaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
datacenterYaml = dcyaml
var sshAuthorizedKey string
if vSphereParams.SSHAuthorizedKeyFile != "" {
b, err := os.ReadFile(vSphereParams.SSHAuthorizedKeyFile)
if err != nil {
return fmt.Errorf("open sshAuthorizedKeyFile file: %v", err)
}
sshAuthorizedKey = string(b)
}

kubernetesVersion = vSphereParams.KubernetesVersion
// need to default control plane config name to something different from the cluster name based on assumption
// in controller code
cpMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName))
workerMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(clusterName)
etcdMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetEtcdNodeName(clusterName))
cpMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName), vSphereParams.Datastore, vSphereParams.Folder, vSphereParams.ResourcePool, vSphereParams.Template, sshAuthorizedKey, vSphereParams.OSFamily, vSphereParams.CPDiskGiB, vSphereParams.CPNumCPUs, vSphereParams.CPMemoryMiB)
workerMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(clusterName, vSphereParams.Datastore, vSphereParams.Folder, vSphereParams.ResourcePool, vSphereParams.Template, sshAuthorizedKey, vSphereParams.OSFamily, vSphereParams.WorkerDiskGiB, vSphereParams.WorkerNumCPUs, vSphereParams.WorkerMemoryMiB)
etcdMachineConfig := v1alpha1.NewVSphereMachineConfigGenerate(providers.GetEtcdNodeName(clusterName), vSphereParams.Datastore, vSphereParams.Folder, vSphereParams.ResourcePool, vSphereParams.Template, sshAuthorizedKey, vSphereParams.OSFamily, vSphereParams.EtcdDiskGiB, vSphereParams.EtcdNumCPUs, vSphereParams.EtcdMemoryMiB)
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithCPMachineGroupRef(cpMachineConfig),
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
Expand Down Expand Up @@ -183,35 +261,167 @@ func generateClusterConfig(clusterName string) error {
}
machineGroupYaml = append(machineGroupYaml, cpMcYaml, workerMcYaml, etcdMcYaml)
case constants.TinkerbellProviderName:
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
datacenterConfig := v1alpha1.NewTinkerbellDatacenterConfigGenerate(clusterName)
var tinkerbellParams v1alpha1.TinkerbellClusterConfigParams
err = yaml.Unmarshal(paramsData, &tinkerbellParams)
if err != nil {
return fmt.Errorf("unmarshal tinkerbellParams: %v", err)
}

if tinkerbellParams.ManagementClusterName != "" {
// override the management cluster name with that from parameter file.
managementClusterName = tinkerbellParams.ManagementClusterName
}

// set podsCidrBlocks and servicesCidrBlocks to the values from parameter file.
podsCidrBlocks = tinkerbellParams.PodsCidrBlocks
servicesCidrBlocks = tinkerbellParams.ServicesCidrBlocks

if tinkerbellParams.CPEndpointHost != "" {
// add control plane endpoint config with host from parameter file.
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpointHost(tinkerbellParams.CPEndpointHost))
} else {
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithClusterEndpoint())
}

kubernetesVersion = tinkerbellParams.KubernetesVersion

adminIP := tinkerbellParams.AdminIP
tinkerbellIP := tinkerbellParams.TinkerbellIP
osImageURL := tinkerbellParams.OSImageURL

// create datacenter config with values from parameter file
datacenterConfig := v1alpha1.NewTinkerbellDatacenterConfigGenerate(clusterName, tinkerbellIP, osImageURL)
clusterConfigOpts = append(clusterConfigOpts, v1alpha1.WithDatacenterRef(datacenterConfig))
// default counts of CP nodes, Etcd nodes and worker nodes.
cpCount := 1
workerCount := 1
if tinkerbellParams.HardwareCSV != "" {
// parse hardware.csv file to get counts of CP/worker nodes
f, err := os.Open(tinkerbellParams.HardwareCSV)
if err != nil {
return fmt.Errorf("open hardware file: %v", err)
}
defer f.Close()
csvReader := csv.NewReader(f)
data, err := csvReader.ReadAll()
if err != nil {
return fmt.Errorf("read hardware file: %v", err)
}
macIndex := -1
ipIndex := -1
labelsIndex := -1
cpCount = 0
workerCount = 0
for i, line := range data {
if i == 0 {
// from the header (first line), find the index of
// MAC, IP, labels.
for j, field := range line {
if strings.EqualFold(field, "mac") {
macIndex = j
} else if strings.EqualFold(field, "ip_address") {
ipIndex = j
} else if strings.EqualFold(field, "labels") {
labelsIndex = j
}
}
if macIndex == -1 {
return fmt.Errorf("no mac header found in hardware file")
}
if ipIndex == -1 {
return fmt.Errorf("no ip header found in hardware file")
}
if labelsIndex == -1 {
return fmt.Errorf("no labels header found in hardware file")
}
} else {
// for rest lines, increase counts of CP nodes and worker nodes.
if strings.ToLower(line[labelsIndex]) == "type=cp" {
cpCount = cpCount + 1
} else {
workerCount = workerCount + 1
}
}
}
}

if tinkerbellParams.CPCount != 0 {
// override counts of CP nodes with value from parameter file.
cpCount = tinkerbellParams.CPCount
}

if tinkerbellParams.WorkerCount != 0 {
// override counts of Worker nodes with value from parameter file.
workerCount = tinkerbellParams.WorkerCount
}

clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.ControlPlaneConfigCount(1),
v1alpha1.WorkerNodeConfigCount(1),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
v1alpha1.ControlPlaneConfigCount(cpCount),
)
if workerCount > 0 {
// only generate worker cluster when worker count > 0.
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WorkerNodeConfigCount(workerCount),
v1alpha1.WorkerNodeConfigName(constants.DefaultWorkerNodeGroupName),
)
}
dcyaml, err := yaml.Marshal(datacenterConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
datacenterYaml = dcyaml

cpMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(providers.GetControlPlaneNodeName(clusterName))
workerMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(clusterName)
var sshAuthorizedKey string
if tinkerbellParams.SSHAuthorizedKeyFile != "" {
b, err := os.ReadFile(tinkerbellParams.SSHAuthorizedKeyFile)
if err != nil {
return fmt.Errorf("open sshAuthorizedKeyFile file: %v", err)
}
sshAuthorizedKey = string(b)
}

cpMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(clusterName, providers.GetControlPlaneNodeName(clusterName), "cp", sshAuthorizedKey, tinkerbellParams.OSFamily)
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithCPMachineGroupRef(cpMachineConfig),
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
)
cpMcYaml, err := yaml.Marshal(cpMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
workerMcYaml, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
machineGroupYaml = append(machineGroupYaml, cpMcYaml)

if workerCount > 0 {
workerMachineConfig := v1alpha1.NewTinkerbellMachineConfigGenerate(clusterName, clusterName, "worker", sshAuthorizedKey, tinkerbellParams.OSFamily)
// only generate worker machine group reference when worker count > 0.
clusterConfigOpts = append(clusterConfigOpts,
v1alpha1.WithWorkerMachineGroupRef(workerMachineConfig),
)
// only generate worker machine config YAML when worker count > 0.
workerMcYaml, err := yaml.Marshal(workerMachineConfig)
if err != nil {
return fmt.Errorf("generating cluster yaml: %v", err)
}
machineGroupYaml = append(machineGroupYaml, workerMcYaml)
}

if viper.IsSet("paramsFile") {
if tinkerbellParams.TinkerbellTemplateConfigTemplateFile != "" {
b, err := os.ReadFile(tinkerbellParams.TinkerbellTemplateConfigTemplateFile)
if err != nil {
return fmt.Errorf("open tinkerbellTemplateConfigTemplateFile file: %v", err)
}
tinkerbellTemplateConfigTemplate = string(b)
} else if tinkerbellParams.OSFamily == v1alpha1.Ubuntu {
tinkerbellTemplateConfigTemplate = GetDefaultTinkerbellTemplateConfigTemplateUbuntu()
} else if tinkerbellParams.OSFamily == v1alpha1.Bottlerocket {
tinkerbellTemplateConfigTemplate = GetDefaultTinkerbellTemplateConfigTemplateBottlerocket()
}

tinkerbellTemplateConfigTemplate = strings.Replace(tinkerbellTemplateConfigTemplate, "$$NAME", clusterName, -1)
tinkerbellTemplateConfigTemplate = strings.Replace(tinkerbellTemplateConfigTemplate, "$$IMG_URL", osImageURL, -1)
tinkerbellTemplateConfigTemplate = strings.Replace(tinkerbellTemplateConfigTemplate, "$$ADMIN_IP", adminIP, -1)
tinkerbellTemplateConfigTemplate = strings.Replace(tinkerbellTemplateConfigTemplate, "$$TINKERBELL_IP", tinkerbellIP, -1)
}
machineGroupYaml = append(machineGroupYaml, cpMcYaml, workerMcYaml)
case constants.NutanixProviderName:
datacenterConfig := v1alpha1.NewNutanixDatacenterConfigGenerate(clusterName)
dcYaml, err := yaml.Marshal(datacenterConfig)
Expand Down Expand Up @@ -257,7 +467,8 @@ func generateClusterConfig(clusterName string) error {
default:
return fmt.Errorf("not a valid provider")
}
config := v1alpha1.NewClusterGenerate(clusterName, clusterConfigOpts...)

config := v1alpha1.NewClusterGenerate(clusterName, managementClusterName, kubernetesVersion, podsCidrBlocks, servicesCidrBlocks, clusterConfigOpts...)

configMarshal, err := yaml.Marshal(config)
if err != nil {
Expand All @@ -272,6 +483,13 @@ func generateClusterConfig(clusterName string) error {
resources = append(resources, machineGroupYaml...)
}

fmt.Println(string(templater.AppendYamlResources(resources...)))
fmt.Print(string(templater.AppendYamlResources(resources...)))

if tinkerbellTemplateConfigTemplate != "" {
fmt.Println(tinkerbellTemplateConfigTemplate)
} else {
fmt.Println("")
}

return nil
}
Loading

0 comments on commit 51e87d5

Please sign in to comment.