feat: add Apache Cloudstack support

Add support for new platform.

Signed-off-by: Andrey Smirnov <andrey.smirnov@siderolabs.com>
Signed-off-by: Claus Albøge <ca@netic.dk>
This commit is contained in:
Claus Albøge 2024-08-16 01:54:29 +02:00 committed by Andrey Smirnov
parent 951cf66fdc
commit 75cecb4210
No known key found for this signature in database
GPG Key ID: FE042E3D4085A811
11 changed files with 510 additions and 4 deletions

View File

@ -395,7 +395,7 @@ image-%: ## Builds the specified image. Valid options are aws, azure, digital-oc
images-essential: image-aws image-azure image-gcp image-metal secureboot-installer ## Builds only essential images used in the CI (AWS, GCP, and Metal). images-essential: image-aws image-azure image-gcp image-metal secureboot-installer ## Builds only essential images used in the CI (AWS, GCP, and Metal).
images: image-akamai image-aws image-azure image-digital-ocean image-exoscale image-gcp image-hcloud image-iso image-metal image-nocloud image-opennebula image-openstack image-oracle image-scaleway image-upcloud image-vmware image-vultr ## Builds all known images (AWS, Azure, DigitalOcean, Exoscale, GCP, HCloud, Metal, NoCloud, OpenNebula, OpenStack, Oracle, Scaleway, UpCloud, Vultr and VMware). images: image-akamai image-aws image-azure image-digital-ocean image-exoscale image-cloudstack image-gcp image-hcloud image-iso image-metal image-nocloud image-opennebula image-openstack image-oracle image-scaleway image-upcloud image-vmware image-vultr ## Builds all known images (AWS, Azure, DigitalOcean, Exoscale, Cloudstack, GCP, HCloud, Metal, NoCloud, OpenNebula, OpenStack, Oracle, Scaleway, UpCloud, Vultr and VMware).
.PHONY: iso .PHONY: iso
iso: image-iso ## Builds the ISO and outputs it to the artifact directory. iso: image-iso ## Builds the ISO and outputs it to the artifact directory.

View File

@ -192,6 +192,12 @@ The Talos bundled Flannel manifest was simplified to remove the `install-cni` st
title = "Device Extra Settle Timeout" title = "Device Extra Settle Timeout"
description = """\ description = """\
Talos Linux now supports a kernel command line argument `talos.device.settle_time=3m` to set the device extra settle timeout to workaround issues with broken drivers. Talos Linux now supports a kernel command line argument `talos.device.settle_time=3m` to set the device extra settle timeout to workaround issues with broken drivers.
"""
[notes.platform]
title = "Platform Support"
description = """\
Talos Linux now supports Apache CloudStack platform.
""" """
[make_deps] [make_deps]

View File

@ -0,0 +1,117 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
// Package cloudstack contains the Cloudstack platform implementation.
package cloudstack
import (
"context"
"fmt"
"log"
"net/netip"
"strings"
"github.com/cosi-project/runtime/pkg/state"
"github.com/siderolabs/go-procfs/procfs"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/errors"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/internal/netutils"
"github.com/siderolabs/talos/pkg/download"
"github.com/siderolabs/talos/pkg/machinery/constants"
"github.com/siderolabs/talos/pkg/machinery/resources/network"
runtimeres "github.com/siderolabs/talos/pkg/machinery/resources/runtime"
)
// Cloudstack is the concrete type that implements the runtime.Platform interface.
type Cloudstack struct{}
// ParseMetadata converts Cloudstack platform metadata into platform network config.
func (e *Cloudstack) ParseMetadata(metadata *MetadataConfig) (*runtime.PlatformNetworkConfig, error) {
networkConfig := &runtime.PlatformNetworkConfig{}
if metadata.Hostname != "" {
hostnameSpec := network.HostnameSpecSpec{
ConfigLayer: network.ConfigPlatform,
}
if err := hostnameSpec.ParseFQDN(metadata.Hostname); err != nil {
return nil, err
}
networkConfig.Hostnames = append(networkConfig.Hostnames, hostnameSpec)
}
if metadata.PublicIPv4 != "" {
if ip, err := netip.ParseAddr(metadata.PublicIPv4); err == nil {
networkConfig.ExternalIPs = append(networkConfig.ExternalIPs, ip)
}
}
networkConfig.Metadata = &runtimeres.PlatformMetadataSpec{
Platform: e.Name(),
Hostname: metadata.Hostname,
Region: metadata.Zone,
Zone: metadata.Zone,
InstanceType: strings.ToLower(strings.SplitN(metadata.InstanceType, " ", 2)[0]),
InstanceID: metadata.InstanceID,
ProviderID: fmt.Sprintf("cloudstack://%s", metadata.InstanceID),
}
return networkConfig, nil
}
// Name implements the runtime.Platform interface.
func (e *Cloudstack) Name() string {
return "cloudstack"
}
// Configuration implements the runtime.Platform interface.
func (e *Cloudstack) Configuration(ctx context.Context, r state.State) ([]byte, error) {
if err := netutils.Wait(ctx, r); err != nil {
return nil, err
}
log.Printf("fetching machine config from %q", CloudstackUserDataEndpoint)
return download.Download(ctx, CloudstackUserDataEndpoint,
download.WithErrorOnNotFound(errors.ErrNoConfigSource),
download.WithErrorOnEmptyResponse(errors.ErrNoConfigSource))
}
// Mode implements the runtime.Platform interface.
func (e *Cloudstack) Mode() runtime.Mode {
return runtime.ModeCloud
}
// KernelArgs implements the runtime.Platform interface.
func (e *Cloudstack) KernelArgs(string) procfs.Parameters {
return []*procfs.Parameter{
procfs.NewParameter("console").Append("tty1").Append("ttyS0"),
procfs.NewParameter(constants.KernelParamNetIfnames).Append("0"),
}
}
// NetworkConfiguration implements the runtime.Platform interface.
func (e *Cloudstack) NetworkConfiguration(ctx context.Context, _ state.State, ch chan<- *runtime.PlatformNetworkConfig) error {
log.Printf("fetching cloudstack instance config from: %q", CloudstackMetadataEndpoint)
metadata, err := e.getMetadata(ctx)
if err != nil {
return err
}
networkConfig, err := e.ParseMetadata(metadata)
if err != nil {
return err
}
select {
case ch <- networkConfig:
case <-ctx.Done():
return ctx.Err()
}
return nil
}

View File

@ -0,0 +1,39 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package cloudstack_test
import (
_ "embed"
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v3"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/cloudstack"
)
//go:embed testdata/metadata.json
var rawMetadata []byte
//go:embed testdata/expected.yaml
var expectedNetworkConfig string
func TestEmpty(t *testing.T) {
p := &cloudstack.Cloudstack{}
var m cloudstack.MetadataConfig
require.NoError(t, json.Unmarshal(rawMetadata, &m))
networkConfig, err := p.ParseMetadata(&m)
require.NoError(t, err)
marshaled, err := yaml.Marshal(networkConfig)
require.NoError(t, err)
assert.Equal(t, expectedNetworkConfig, string(marshaled))
}

View File

@ -0,0 +1,76 @@
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
package cloudstack
import (
"context"
stderrors "errors"
"fmt"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/errors"
"github.com/siderolabs/talos/pkg/download"
)
const (
// CloudstackMetadataEndpoint is the local Cloudstack endpoint.
CloudstackMetadataEndpoint = "http://data-server./latest/meta-data"
// CloudstackUserDataEndpoint is the local Cloudstack endpoint for the config.
CloudstackUserDataEndpoint = "http://data-server./latest/user-data"
)
// MetadataConfig represents a metadata Cloudstack instance.
type MetadataConfig struct {
Hostname string `json:"local-hostname,omitempty"`
InstanceID string `json:"instance-id,omitempty"`
InstanceType string `json:"service-offering,omitempty"`
PublicIPv4 string `json:"public-ipv4,omitempty"`
Zone string `json:"availability-zone,omitempty"`
}
/*
local-ipv4
public-hostname
vm-id
public-keys
cloud-identifier
hypervisor-host-name
*/
func (e *Cloudstack) getMetadata(ctx context.Context) (metadata *MetadataConfig, err error) {
getMetadataKey := func(key string) (string, error) {
res, metaerr := download.Download(ctx, fmt.Sprintf("%s/%s", CloudstackMetadataEndpoint, key),
download.WithErrorOnNotFound(errors.ErrNoConfigSource),
download.WithErrorOnEmptyResponse(errors.ErrNoConfigSource))
if metaerr != nil && !stderrors.Is(metaerr, errors.ErrNoConfigSource) {
return "", fmt.Errorf("failed to fetch %q from IMDS: %w", key, metaerr)
}
return string(res), nil
}
metadata = &MetadataConfig{}
if metadata.Hostname, err = getMetadataKey("local-hostname"); err != nil {
return nil, err
}
if metadata.InstanceType, err = getMetadataKey("service-offering"); err != nil {
return nil, err
}
if metadata.InstanceID, err = getMetadataKey("instance-id"); err != nil {
return nil, err
}
if metadata.PublicIPv4, err = getMetadataKey("public-ipv4"); err != nil {
return nil, err
}
if metadata.Zone, err = getMetadataKey("availability-zone"); err != nil {
return nil, err
}
return metadata, nil
}

View File

@ -0,0 +1,18 @@
addresses: []
links: []
routes: []
hostnames:
- hostname: talos
domainname: fqdn
layer: platform
resolvers: []
timeServers: []
operators: []
externalIPs:
- 1.2.3.4
metadata:
platform: cloudstack
hostname: talos.fqdn
instanceType: standard.tiny
instanceId: 3fe6b28a-669e-4eb2-bffd-4180c572c410
providerId: cloudstack://3fe6b28a-669e-4eb2-bffd-4180c572c410

View File

@ -0,0 +1,6 @@
{
"local-hostname": "talos.fqdn",
"instance-id": "3fe6b28a-669e-4eb2-bffd-4180c572c410",
"public-ipv4": "1.2.3.4",
"service-offering": "standard.tiny"
}

View File

@ -18,6 +18,7 @@ import (
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/akamai" "github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/akamai"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/aws" "github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/aws"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/azure" "github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/azure"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/cloudstack"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/container" "github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/container"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/digitalocean" "github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/digitalocean"
"github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/equinixmetal" "github.com/siderolabs/talos/internal/app/machined/pkg/runtime/v1alpha1/platform/equinixmetal"
@ -88,7 +89,7 @@ func NewPlatform(platform string) (p runtime.Platform, err error) {
return newPlatform(platform) return newPlatform(platform)
} }
//nolint:gocyclo //nolint:gocyclo,cyclop
func newPlatform(platform string) (p runtime.Platform, err error) { func newPlatform(platform string) (p runtime.Platform, err error) {
switch platform { switch platform {
case "akamai": case "akamai":
@ -97,6 +98,8 @@ func newPlatform(platform string) (p runtime.Platform, err error) {
return aws.NewAWS() return aws.NewAWS()
case "azure": case "azure":
p = &azure.Azure{} p = &azure.Azure{}
case "cloudstack":
p = &cloudstack.Cloudstack{}
case "container": case "container":
p = &container.Container{} p = &container.Container{}
case "digital-ocean": case "digital-ocean":

View File

@ -121,6 +121,18 @@ var Default = map[string]Profile{
}, },
}, },
}, },
"cloudstack": {
Platform: "cloudstack",
SecureBoot: pointer.To(false),
Output: Output{
Kind: OutKindImage,
OutFormat: OutFormatZSTD,
ImageOptions: &ImageOptions{
DiskSize: DefaultRAWDiskSize,
DiskFormat: DiskFormatRaw,
},
},
},
"digital-ocean": { "digital-ocean": {
Platform: "digital-ocean", Platform: "digital-ocean",
SecureBoot: pointer.To(false), SecureBoot: pointer.To(false),

View File

@ -6,14 +6,14 @@ description: "Table of supported Talos Linux versions and respective platforms."
| Talos Version | 1.8 | 1.7 | | Talos Version | 1.8 | 1.7 |
| ----------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ----------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| Release Date | 2024-08-15 (TBD) | 2024-04-19 (1.7.0) | | Release Date | 2024-09-15 (TBD) | 2024-04-19 (1.7.0) |
| End of Community Support | 1.9.0 release (2024-12-15, TBD) | 1.8.0 release (2024-08-15) | | End of Community Support | 1.9.0 release (2024-12-15, TBD) | 1.8.0 release (2024-08-15) |
| Enterprise Support | [offered by Sidero Labs Inc.](https://www.siderolabs.com/support/) | [offered by Sidero Labs Inc.](https://www.siderolabs.com/support/) | | Enterprise Support | [offered by Sidero Labs Inc.](https://www.siderolabs.com/support/) | [offered by Sidero Labs Inc.](https://www.siderolabs.com/support/) |
| Kubernetes | 1.31, 1.30, 1.29, 1.28, 1.27, 1.26 | 1.30, 1.29, 1.28, 1.27, 1.26, 1.25 | | Kubernetes | 1.31, 1.30, 1.29, 1.28, 1.27, 1.26 | 1.30, 1.29, 1.28, 1.27, 1.26, 1.25 |
| NVIDIA Drivers | 550.x.x (PRODUCTION), 535.x.x (LTS) | 535.x.x (LTS) | | NVIDIA Drivers | 550.x.x (PRODUCTION), 535.x.x (LTS) | 535.x.x (LTS) |
| Architecture | amd64, arm64 | amd64, arm64 | | Architecture | amd64, arm64 | amd64, arm64 |
| **Platforms** | | | | **Platforms** | | |
| - cloud | Akamai, AWS, GCP, Azure, Digital Ocean, Exoscale, Hetzner, OpenNebula, OpenStack, Oracle Cloud, Scaleway, Vultr, Upcloud | Akamai, AWS, GCP, Azure, Digital Ocean, Exoscale, Hetzner, OpenNebula, OpenStack, Oracle Cloud, Scaleway, Vultr, Upcloud | | - cloud | Akamai, AWS, GCP, Azure, CloudStack, Digital Ocean, Exoscale, Hetzner, OpenNebula, OpenStack, Oracle Cloud, Scaleway, Vultr, Upcloud | Akamai, AWS, GCP, Azure, Digital Ocean, Exoscale, Hetzner, OpenNebula, OpenStack, Oracle Cloud, Scaleway, Vultr, Upcloud |
| - bare metal | x86: BIOS, UEFI, SecureBoot; arm64: UEFI, SecureBoot; boot: ISO, PXE, disk image | x86: BIOS, UEFI; arm64: UEFI; boot: ISO, PXE, disk image | | - bare metal | x86: BIOS, UEFI, SecureBoot; arm64: UEFI, SecureBoot; boot: ISO, PXE, disk image | x86: BIOS, UEFI; arm64: UEFI; boot: ISO, PXE, disk image |
| - virtualized | VMware, Hyper-V, KVM, Proxmox, Xen | VMware, Hyper-V, KVM, Proxmox, Xen | | - virtualized | VMware, Hyper-V, KVM, Proxmox, Xen | VMware, Hyper-V, KVM, Proxmox, Xen |
| - SBCs | Banana Pi M64, Jetson Nano, Libre Computer Board ALL-H3-CC, Nano Pi R4S, Pine64, Pine64 Rock64, Radxa ROCK Pi 4c, Radxa Rock4c+, Raspberry Pi 4B, Raspberry Pi Compute Module 4 | Banana Pi M64, Jetson Nano, Libre Computer Board ALL-H3-CC, Nano Pi R4S, Orange Pi R1 Plus LTS, Pine64, Pine64 Rock64, Radxa ROCK Pi 4c, Raspberry Pi 4B, Raspberry Pi Compute Module 4 | | - SBCs | Banana Pi M64, Jetson Nano, Libre Computer Board ALL-H3-CC, Nano Pi R4S, Pine64, Pine64 Rock64, Radxa ROCK Pi 4c, Radxa Rock4c+, Raspberry Pi 4B, Raspberry Pi Compute Module 4 | Banana Pi M64, Jetson Nano, Libre Computer Board ALL-H3-CC, Nano Pi R4S, Orange Pi R1 Plus LTS, Pine64, Pine64 Rock64, Radxa ROCK Pi 4c, Raspberry Pi 4B, Raspberry Pi Compute Module 4 |
@ -45,6 +45,7 @@ description: "Table of supported Talos Linux versions and respective platforms."
### Tier 3 ### Tier 3
* Akamai * Akamai
* CloudStack
* Exoscale * Exoscale
* Hetzner * Hetzner
* nocloud * nocloud

View File

@ -0,0 +1,228 @@
---
title: "CloudStack"
description: "Creating a cluster via the CLI (cmk) on Apache CloudStack."
---
## Creating a Talos Linux Cluster on Apache CloudStack via the CMK CLI
In this guide we will create an single node Kubernetes cluster in Apache CloudStack.
We assume Apache CloudStack is already running in a basic configuration - and some familiarity with Apache CloudStack.
We will be using the [CloudStack Cloudmonkey](https://github.com/apache/cloudstack-cloudmonkey) CLI tool.
Please see the [official Apache CloudStack documentation](https://docs.cloudstack.apache.org/en/latest/) for information related to Apache CloudStack.
### Obtain the Talos Image
Download the Talos CloudStack image `cloudstack-amd64.raw.gz` from the [Image Factory](https://factory.talos.dev).
> Note: the minimum version of Talos required to support Apache CloudStack is v1.8.0.
Using an upload method of your choice, upload the image to a Apache CloudStack.
You might be able to use the "Register Template from URL" to download the image directly from the Image Factory.
> Note: CloudStack does not seem to like compressed images, so you might have to download the image to a local webserver, uncompress it and let CloudStack fetch the image from there instead.
> Alternatively, you can try to remove `.gz` from URL to fetch an uncompressed image from the Image Factory.
### Get Required Variables
Next we will get a number of required variables and export them for later use:
#### Get Image Template ID
```bash
$ cmk list templates templatefilter=self | jq -r '.template[] | [.id, .name] | @tsv' | sort -k2
01813d29-1253-4080-8d29-d405d94148af Talos 1.8.0
...
$ export IMAGE_ID=01813d29-1253-4080-8d29-d405d94148af
```
#### Get Zone ID
Get a list of Zones and select the relevant zone
```bash
$ cmk list zones | jq -r '.zone[] | [.id, .name] | @tsv' | sort -k2
a8c71a6f-2e09-41ed-8754-2d4dd8783920 fsn1
9d38497b-d810-42ab-a772-e596994d21d2 fsn2
...
$ export ZONE_ID=a8c71a6f-2e09-41ed-8754-2d4dd8783920
```
#### Get Service Offering ID
Get a list of service offerings (instance types) and select the desired offering
```bash
$ cmk list serviceofferings | jq -r '.serviceoffering[] | [.id, .memory, .cpunumber, .name] | @tsv' | sort -k4
82ac8c87-22ee-4ec3-8003-c80b09efe02c 2048 2 K8S-CP-S
c7f5253e-e1f1-4e33-a45e-eb2ebbc65fd4 4096 2 K8S-WRK-S
...
$ export SERVICEOFFERING_ID=82ac8c87-22ee-4ec3-8003-c80b09efe02c
```
#### Get Network ID
Get a list of networks and select the relevant network for your cluster.
```bash
$ cmk list networks zoneid=${ZONE_ID} | jq -r '.network[] | [.id, .type, .name] | @tsv' | sort -k3
f706984f-9dd1-4cb8-9493-3fba1f0de7e3 Isolate demo
143ed8f1-3cc5-4ba2-8717-457ad993cf25 Isolated talos
...
$ export NETWORK_ID=143ed8f1-3cc5-4ba2-8717-457ad993cf25
```
#### Get next free Public IP address and ID
To create a loadbalancer for the K8S API Endpoint, find the next available public IP address in the zone.
(In this test environment, the 10.0.0.0/24 RFC-1918 IP range has been configured as "Public IP addresses")
```bash
$ cmk list publicipaddresses zoneid=${ZONE_ID} state=free forvirtualnetwork=true | jq -r '.publicipaddress[] | [.id, .ipaddress] | @tsv' | sort -k2
1901d946-3797-48aa-a113-8fb730b0770a 10.0.0.102
fa207d0e-c8f8-4f09-80f0-d45a6aac77eb 10.0.0.103
aa397291-f5dc-4903-b299-277161b406cb 10.0.0.104
...
$ export PUBLIC_IPADDRESS=10.0.0.102
$ export PUBLIC_IPADDRESS_ID=1901d946-3797-48aa-a113-8fb730b0770a
```
#### Acquire and Associate Public IP Address
Acquire and associate the public IP address with the network we selected earlier.
```bash
$ cmk associateIpAddress ipaddress=${PUBLIC_IPADDRESS} networkid=${NETWORK_ID}
{
"ipaddress": {
...,
"ipaddress": "10.0.0.102",
...
}
}
```
#### Create LB and FW rule using the Public IP Address
Create a Loadbalancer for the K8S API Endpoint.
> Note: The "create loadbalancerrule" also takes care of creating a corresponding firewallrule.
```bash
$ cmk create loadbalancerrule algorithm=roundrobin name="k8s-api" privateport=6443 publicport=6443 openfirewall=true publicipid=${PUBLIC_IPADDRESS_ID} cidrlist=0.0.0.0/0
{
"loadbalancer": {
...
"name": "k8s-api",
"networkid": "143ed8f1-3cc5-4ba2-8717-457ad993cf25",
"privateport": "6443",
"publicip": "10.0.0.102",
"publicipid": "1901d946-3797-48aa-a113-8fb730b0770a",
"publicport": "6443",
...
}
}
```
### Create the Talos Configuration Files
Finally it's time to generate the Talos configuration files, using the Public IP address assigned to the loadbalancer.
```bash
$ talosctl gen config talos-cloudstack https://${PUBLIC_IPADDRESS}:6443 --with-docs=false --with-examples=false
created controlplane.yaml
created worker.yaml
created talosconfig
```
Make any adjustments to the `controlplane.yaml` and/or `worker.yaml` as you like.
> Note: Remember to validate!
#### Create Talos VM
Next we will create the actual VM and supply the `controlplane.yaml` as base64 encoded `userdata`.
```bash
$ cmk deploy virtualmachine zoneid=${ZONEID} templateid=${IMAGE_ID} serviceofferingid=${SERVICEOFFERING_ID} networkIds=${NETWORK_ID} name=talosdemo usersdata=$(base64 controlplane.yaml | tr -d '\n')
{
"virtualmachine": {
"account": "admin",
"affinitygroup": [],
"cpunumber": 2,
"cpuspeed": 2000,
"cpuused": "0.3%",
...
}
}
```
#### Get Talos VM ID and Internal IP address
Get the ID of our newly created VM.
(Also available in the full output of the above command.)
```bash
$ cmk list virtualmachines | jq -r '.virtualmachine[] | [.id, .ipaddress, .name]|@tsv' | sort -k3
9c119627-cb38-4b64-876b-ca2b79820b5a 10.1.1.154 srv03
545099fc-ec2d-4f32-915d-b0c821cfb634 10.1.1.97 srv04
d37aeca4-7d1f-45cd-9a4d-97fdbf535aa1 10.1.1.243 talosdemo
$ export VM_ID=d37aeca4-7d1f-45cd-9a4d-97fdbf535aa1
$ export VM_IP=10.1.1.243
```
#### Get Load Balancer ID
Obtain the ID of the `loadbalancerrule` we created earlier.
```bash
$ cmk list loadbalancerrules | jq -r '.loadbalancerrule[]| [.id, .publicip, .name] | @tsv' | sort -k2
ede6b711-b6bc-4ade-9e48-4b3f5aa59934 10.0.0.102 k8s-api
1bad3c46-96fa-4f50-a4fc-9a46a54bc350 10.0.0.197 ac0b5d98cf6a24d55a4fb2f9e240c473-tcp-443
$ export LB_RULE_ID=ede6b711-b6bc-4ade-9e48-4b3f5aa59934
```
#### Assign Talos VM to Load Balancer
With the ID of the VM and the load balancer, we can assign the VM to the `loadbalancerrule`, making the K8S API endpoint available via the Load Balancer
```bash
cmk assigntoloadbalancerrule id=${LB_RULE_ID} virtualmachineids=${VM_ID}
```
### Bootstrap Etcd
Once the Talos VM has booted, it time to bootstrap etcd.
Configure `talosctl` with IP addresses of the control plane node's IP address.
Set the `endpoints` and `nodes`:
```bash
talosctl --talosconfig talosconfig config endpoint ${VM_IP}
talosctl --talosconfig talosconfig config node ${VM_IP}
```
Next, bootstrap `etcd`:
```bash
talosctl --talosconfig talosconfig bootstrap
```
### Retrieve the `kubeconfig`
At this point we can retrieve the admin `kubeconfig` by running:
```bash
talosctl --talosconfig talosconfig kubeconfig .
```
We can also watch the cluster bootstrap via:
```bash
talosctl --talosconfig talosconfig dashboard
```