chore: azure e2e

Add code to support azure e2e

Signed-off-by: Noel Georgi <git@frezbo.dev>
This commit is contained in:
Noel Georgi 2024-03-22 21:28:32 +05:30
parent 55dd41c0df
commit ee51f04af3
No known key found for this signature in database
GPG Key ID: 21A9F444075C9E36
10 changed files with 212 additions and 129 deletions

View File

@ -739,16 +739,17 @@ local integration_pipelines = [
Pipeline('cron-integration-qemu-csi', default_pipeline_steps + [integration_qemu_csi], [default_cron_pipeline]) + cron_trigger(['nightly']), Pipeline('cron-integration-qemu-csi', default_pipeline_steps + [integration_qemu_csi], [default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-integration-images', default_pipeline_steps + [integration_images], [default_cron_pipeline]) + cron_trigger(['nightly']), Pipeline('cron-integration-images', default_pipeline_steps + [integration_images], [default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-integration-reproducibility-test', default_pipeline_steps + [integration_reproducibility_test], [default_cron_pipeline]) + cron_trigger(['nightly']), Pipeline('cron-integration-reproducibility-test', default_pipeline_steps + [integration_reproducibility_test], [default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-image-factory', default_pipeline_steps + [ Pipeline('cron-image-factory',
integration_factory_16_iso, default_pipeline_steps + [
integration_factory_16_image, integration_factory_16_iso,
integration_factory_16_pxe, integration_factory_16_image,
integration_factory_16_secureboot, integration_factory_16_pxe,
integration_factory_15_iso, integration_factory_16_secureboot,
integration_factory_13_iso, integration_factory_15_iso,
integration_factory_13_image, integration_factory_13_iso,
], integration_factory_13_image,
[default_cron_pipeline]) + cron_trigger(['nightly']), ],
[default_cron_pipeline]) + cron_trigger(['nightly']),
]; ];
@ -835,11 +836,75 @@ local E2EAWS(target) =
targets; targets;
local E2EAzure() =
local depends_on = [load_artifacts];
local e2e_azure_prepare = Step(
'e2e-azure-prepare',
depends_on=depends_on,
environment=creds_env_vars {
IMAGE_REGISTRY: local_registry,
},
extra_commands=[
'az login --service-principal -u "$${AZURE_CLIENT_ID}" -p "$${AZURE_CLIENT_SECRET}" --tenant "$${AZURE_TENANT_ID}"',
'az storage blob upload-batch --overwrite -s _out --pattern "e2e-azure-generated/*" -d "${CI_COMMIT_SHA}${DRONE_TAG//./-}"',
]
);
local tf_apply = TriggerDownstream(
'tf-apply',
'e2e-talos-tf-apply',
['siderolabs/contrib@main'],
params=[
'BUCKET_PATH=${CI_COMMIT_SHA}${DRONE_TAG//./-}',
'TYPE=azure',
],
depends_on=[e2e_azure_prepare],
);
local e2e_azure_tf_apply_post = Step(
'e2e-azure-download-artifacts',
with_make=false,
environment=creds_env_vars,
extra_commands=[
'az login --service-principal -u "$${AZURE_CLIENT_ID}" -p "$${AZURE_CLIENT_SECRET}" --tenant "$${AZURE_TENANT_ID}"',
'az storage blob download -f _out/e2e-azure-talosconfig -n e2e-azure-talosconfig -c ${CI_COMMIT_SHA}${DRONE_TAG//./-}',
'az storage blob download -f _out/e2e-azure-kubeconfig -n e2e-azure-kubeconfig -c ${CI_COMMIT_SHA}${DRONE_TAG//./-}',
],
depends_on=[tf_apply],
);
local e2e_azure = Step(
'e2e-azure',
depends_on=[e2e_azure_tf_apply_post],
environment=creds_env_vars {}
);
local tf_destroy = TriggerDownstream(
'tf-destroy',
'e2e-talos-tf-destroy',
['siderolabs/contrib@main'],
params=[
'BUCKET_PATH=${CI_COMMIT_SHA}${DRONE_TAG//./-}',
'TYPE=azure',
],
depends_on=[e2e_azure],
when={
status: [
'failure',
'success',
],
},
);
local targets = [e2e_azure_prepare, tf_apply, e2e_azure_tf_apply_post, e2e_azure, tf_destroy];
targets;
local e2e_aws = [step for step in E2EAWS('default')]; local e2e_aws = [step for step in E2EAWS('default')];
local e2e_aws_nvidia_oss = [step for step in E2EAWS('nvidia-oss')]; local e2e_aws_nvidia_oss = [step for step in E2EAWS('nvidia-oss')];
local e2e_azure = [step for step in E2EAzure()];
local e2e_azure = Step('e2e-azure', depends_on=[e2e_capi], environment=creds_env_vars);
local e2e_gcp = Step('e2e-gcp', depends_on=[e2e_capi], environment=creds_env_vars); local e2e_gcp = Step('e2e-gcp', depends_on=[e2e_capi], environment=creds_env_vars);
local e2e_trigger(names) = { local e2e_trigger(names) = {
@ -854,10 +919,12 @@ local e2e_pipelines = [
// regular pipelines, triggered on promote events // regular pipelines, triggered on promote events
Pipeline('e2e-aws', default_pipeline_steps + e2e_aws) + e2e_trigger(['e2e-aws']), Pipeline('e2e-aws', default_pipeline_steps + e2e_aws) + e2e_trigger(['e2e-aws']),
Pipeline('e2e-aws-nvidia-oss', default_pipeline_steps + e2e_aws_nvidia_oss) + e2e_trigger(['e2e-aws-nvidia-oss']), Pipeline('e2e-aws-nvidia-oss', default_pipeline_steps + e2e_aws_nvidia_oss) + e2e_trigger(['e2e-aws-nvidia-oss']),
Pipeline('e2e-azure', default_pipeline_steps + e2e_azure) + e2e_trigger(['e2e-azure']),
Pipeline('e2e-gcp', default_pipeline_steps + [capi_docker, e2e_capi, e2e_gcp]) + e2e_trigger(['e2e-gcp']), Pipeline('e2e-gcp', default_pipeline_steps + [capi_docker, e2e_capi, e2e_gcp]) + e2e_trigger(['e2e-gcp']),
// cron pipelines, triggered on schedule events // cron pipelines, triggered on schedule events
Pipeline('cron-e2e-aws', default_pipeline_steps + e2e_aws, [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']), Pipeline('cron-e2e-aws', default_pipeline_steps + e2e_aws, [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']),
Pipeline('cron-e2e-azure', default_pipeline_steps + e2e_azure, [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']),
Pipeline('cron-e2e-gcp', default_pipeline_steps + [capi_docker, e2e_capi, e2e_gcp], [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']), Pipeline('cron-e2e-gcp', default_pipeline_steps + [capi_docker, e2e_capi, e2e_gcp], [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']),
]; ];

View File

@ -370,7 +370,7 @@ image-%: ## Builds the specified image. Valid options are aws, azure, digital-oc
docker run --rm -t -v /dev:/dev -v $(PWD)/$(ARTIFACTS):/secureboot:ro -v $(PWD)/$(ARTIFACTS):/out --network=host --privileged $(REGISTRY_AND_USERNAME)/imager:$(IMAGE_TAG) $* --arch $$arch $(IMAGER_ARGS) ; \ docker run --rm -t -v /dev:/dev -v $(PWD)/$(ARTIFACTS):/secureboot:ro -v $(PWD)/$(ARTIFACTS):/out --network=host --privileged $(REGISTRY_AND_USERNAME)/imager:$(IMAGE_TAG) $* --arch $$arch $(IMAGER_ARGS) ; \
done done
images-essential: image-aws image-gcp image-metal secureboot-installer ## Builds only essential images used in the CI (AWS, GCP, and Metal). images-essential: image-aws image-azure image-gcp image-metal secureboot-installer ## Builds only essential images used in the CI (AWS, GCP, and Metal).
images: image-aws image-azure image-digital-ocean image-exoscale image-gcp image-hcloud image-iso image-metal image-nocloud image-opennebula image-openstack image-oracle image-scaleway image-upcloud image-vmware image-vultr ## Builds all known images (AWS, Azure, DigitalOcean, Exoscale, GCP, HCloud, Metal, NoCloud, OpenNebula, Openstack, Oracle, Scaleway, UpCloud, Vultr and VMware). images: image-aws image-azure image-digital-ocean image-exoscale image-gcp image-hcloud image-iso image-metal image-nocloud image-opennebula image-openstack image-oracle image-scaleway image-upcloud image-vmware image-vultr ## Builds all known images (AWS, Azure, DigitalOcean, Exoscale, GCP, HCloud, Metal, NoCloud, OpenNebula, Openstack, Oracle, Scaleway, UpCloud, Vultr and VMware).

View File

@ -4,4 +4,4 @@ set -e
cd hack/cloud-image-uploader cd hack/cloud-image-uploader
go run . --artifacts-path="../../${ARTIFACTS}" --tag="${TAG}" --abbrev-tag="${ABBREV_TAG}" "$@" go run . --artifacts-path="../../${ARTIFACTS}" --tag="${TAG}" "$@"

View File

@ -27,6 +27,7 @@ import (
"github.com/Azure/go-autorest/autorest/azure/auth" "github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/blang/semver/v4" "github.com/blang/semver/v4"
"github.com/siderolabs/gen/channel" "github.com/siderolabs/gen/channel"
"github.com/siderolabs/gen/xslices"
"github.com/ulikunitz/xz" "github.com/ulikunitz/xz"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -47,29 +48,31 @@ var azureArchitectures = map[string]string{
type AzureUploader struct { type AzureUploader struct {
Options Options Options Options
preRelease bool
helper azureHelper helper azureHelper
} }
// extractVersion extracts the version number in the format of int.int.int for Azure and assigns to the Options.AzureTag value. // extractVersion extracts the version number in the format of int.int.int for Azure and assigns to the Options.AzureTag value.
func (azu *AzureUploader) setVersion() error { func (azu *AzureUploader) setVersion() error {
v, err := semver.ParseTolerant(azu.Options.AzureAbbrevTag) v, err := semver.ParseTolerant(azu.Options.Tag)
if err != nil { if err != nil {
return err return err
} }
versionCore := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) versionCore := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
if fmt.Sprintf("v%s", versionCore) != azu.Options.AzureAbbrevTag { azu.helper.version = versionCore
azu.Options.AzureGalleryName = "SideroLabs"
if fmt.Sprintf("v%s", versionCore) != azu.Options.Tag {
azu.preRelease = true
azu.Options.AzureGalleryName = "SideroGalleryTest" azu.Options.AzureGalleryName = "SideroGalleryTest"
azu.Options.AzureCoreTag = versionCore
fmt.Println(azu.Options.AzureGalleryName)
} else {
azu.Options.AzureGalleryName = "SideroLabs"
azu.Options.AzureCoreTag = versionCore
fmt.Println(azu.Options.AzureGalleryName)
} }
return err log.Println("azure: using Azure Gallery:", azu.Options.AzureGalleryName)
return nil
} }
// AzureGalleryUpload uploads the image to Azure. // AzureGalleryUpload uploads the image to Azure.
@ -91,11 +94,13 @@ func (azu *AzureUploader) AzureGalleryUpload(ctx context.Context) error {
return fmt.Errorf("error setting default Azure credentials: %w", err) return fmt.Errorf("error setting default Azure credentials: %w", err)
} }
log.Printf("azure: getting locations") if len(azu.Options.AzureRegions) == 0 {
regions, err := azu.helper.getAzureLocations(ctx)
if err != nil {
return fmt.Errorf("azure: error setting default Azure credentials: %w", err)
}
err = azu.helper.getAzureLocations(ctx) azu.Options.AzureRegions = regions
if err != nil {
return fmt.Errorf("azure: error setting default Azure credentials: %w", err)
} }
// Upload blob // Upload blob
@ -245,14 +250,16 @@ uploadLoop:
} }
func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch string) error { func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch string) error {
targetRegions := make([]*armcompute.TargetRegion, 0, len(azu.helper.locations)) var targetRegions []*armcompute.TargetRegion
for _, region := range azu.helper.locations { if !azu.preRelease {
targetRegions = append(targetRegions, &armcompute.TargetRegion{ targetRegions = xslices.Map(azu.Options.AzureRegions, func(region string) *armcompute.TargetRegion {
Name: to.Ptr(region.Name), return &armcompute.TargetRegion{
ExcludeFromLatest: to.Ptr(false), Name: to.Ptr(region),
RegionalReplicaCount: to.Ptr[int32](1), ExcludeFromLatest: to.Ptr(false),
StorageAccountType: to.Ptr(armcompute.StorageAccountTypeStandardLRS), RegionalReplicaCount: to.Ptr[int32](1),
StorageAccountType: to.Ptr(armcompute.StorageAccountTypeStandardLRS),
}
}) })
} }
@ -265,8 +272,8 @@ func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch stri
} }
for _, v := range page.Value { for _, v := range page.Value {
if *v.Name == azu.Options.AzureCoreTag { if *v.Name == azu.helper.version {
log.Printf("azure: image version exists for %s\n azure: removing old image version\n", *v.Name) log.Printf("azure: image version exists for %s\n", *v.Name)
err = azu.deleteImageVersion(ctx, arch) err = azu.deleteImageVersion(ctx, arch)
if err != nil { if err != nil {
@ -283,7 +290,7 @@ func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch stri
resourceGroupName, resourceGroupName,
azu.Options.AzureGalleryName, azu.Options.AzureGalleryName,
fmt.Sprintf("talos-%s", azureArchitectures[arch]), fmt.Sprintf("talos-%s", azureArchitectures[arch]),
azu.Options.AzureCoreTag, azu.helper.version,
armcompute.GalleryImageVersion{ armcompute.GalleryImageVersion{
Location: to.Ptr(defaultRegion), Location: to.Ptr(defaultRegion),
Properties: &armcompute.GalleryImageVersionProperties{ Properties: &armcompute.GalleryImageVersionProperties{
@ -309,21 +316,34 @@ func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch stri
return fmt.Errorf("azure: failed to create image version: %w", err) return fmt.Errorf("azure: failed to create image version: %w", err)
} }
_, err = poller.PollUntilDone(ctx, nil) res, err := poller.PollUntilDone(ctx, nil)
if err != nil { if err != nil {
return fmt.Errorf("azure: failed to pull the result for image version creation: %w", err) return fmt.Errorf("azure: failed to pull the result for image version creation: %w", err)
} }
return err for _, region := range azu.Options.AzureRegions {
pushResult(CloudImage{
Cloud: "azure",
Tag: azu.Options.Tag,
Region: region,
Arch: arch,
Type: "vhd",
ID: *res.ID,
})
}
return nil
} }
func (azu *AzureUploader) deleteImageVersion(ctx context.Context, arch string) error { func (azu *AzureUploader) deleteImageVersion(ctx context.Context, arch string) error {
log.Println("azure: removing old image version")
poller, err := azu.helper.clientFactory.NewGalleryImageVersionsClient().BeginDelete( poller, err := azu.helper.clientFactory.NewGalleryImageVersionsClient().BeginDelete(
ctx, ctx,
resourceGroupName, resourceGroupName,
azu.Options.AzureGalleryName, azu.Options.AzureGalleryName,
fmt.Sprintf("talos-%s", azureArchitectures[arch]), fmt.Sprintf("talos-%s", azureArchitectures[arch]),
azu.Options.AzureCoreTag, azu.helper.version,
nil) nil)
if err != nil { if err != nil {
return fmt.Errorf("azure: failed to delete image: %w", err) return fmt.Errorf("azure: failed to delete image: %w", err)
@ -334,16 +354,16 @@ func (azu *AzureUploader) deleteImageVersion(ctx context.Context, arch string) e
return fmt.Errorf("azure: failed to pull the result for image deletion: %w", err) return fmt.Errorf("azure: failed to pull the result for image deletion: %w", err)
} }
return err return nil
} }
type azureHelper struct { type azureHelper struct {
version string
subscriptionID string subscriptionID string
clientFactory *armcompute.ClientFactory clientFactory *armcompute.ClientFactory
cred *azidentity.DefaultAzureCredential cred *azidentity.DefaultAzureCredential
authorizer autorest.Authorizer authorizer autorest.Authorizer
providersClient resources.ProvidersClient providersClient resources.ProvidersClient
locations map[string]Location
} }
func (helper *azureHelper) setDefaultAzureCreds() error { func (helper *azureHelper) setDefaultAzureCreds() error {
@ -385,34 +405,21 @@ func (helper *azureHelper) setDefaultAzureCreds() error {
return nil return nil
} }
//nolint:gocyclo func (helper *azureHelper) getAzureLocations(ctx context.Context) ([]string, error) {
func (helper *azureHelper) getAzureLocations(ctx context.Context) error { var regions []string
providers, err := helper.listProviders(ctx)
result, err := helper.providersClient.Get(ctx, "Microsoft.Compute", "")
if err != nil { if err != nil {
return err return nil, fmt.Errorf("azure: error getting Microsoft.Compute: %w", err)
} }
var computeProvider resources.Provider if result.ResourceTypes != nil {
for _, rt := range *result.ResourceTypes {
for _, provider := range providers {
if provider.Namespace != nil && *provider.Namespace == "Microsoft.Compute" {
computeProvider = provider
break
}
}
helper.locations = make(map[string]Location)
if computeProvider.ResourceTypes != nil {
for _, rt := range *computeProvider.ResourceTypes {
if rt.ResourceType != nil && *rt.ResourceType == "virtualMachines" { if rt.ResourceType != nil && *rt.ResourceType == "virtualMachines" {
if rt.Locations != nil { if rt.Locations != nil {
for _, region := range *rt.Locations { regions = xslices.Map(*rt.Locations, func(s string) string {
abbr := strings.ReplaceAll(region, " ", "") return strings.ToLower(strings.ReplaceAll(s, " ", ""))
abbr = strings.ToLower(abbr) })
helper.locations[abbr] = Location{Abbreviation: abbr, Name: region}
}
} }
break break
@ -420,17 +427,5 @@ func (helper *azureHelper) getAzureLocations(ctx context.Context) error {
} }
} }
return err return regions, nil
}
func (helper *azureHelper) listProviders(ctx context.Context) (result []resources.Provider, err error) {
for list, err := helper.providersClient.List(ctx, ""); list.NotDone(); err = list.NextWithContext(ctx) {
if err != nil {
return nil, fmt.Errorf("azure: error getting providers list: %v", err)
}
result = append(result, list.Values()...)
}
return
} }

View File

@ -54,19 +54,14 @@ func main() {
func run() error { func run() error {
var err error var err error
DefaultOptions.AWSRegions, err = GetAWSDefaultRegions()
if err != nil {
log.Printf("failed to get a list of enabled AWS regions: %s, ignored", err)
}
pflag.StringSliceVar(&DefaultOptions.TargetClouds, "target-clouds", DefaultOptions.TargetClouds, "cloud targets to upload to") pflag.StringSliceVar(&DefaultOptions.TargetClouds, "target-clouds", DefaultOptions.TargetClouds, "cloud targets to upload to")
pflag.StringSliceVar(&DefaultOptions.Architectures, "architectures", DefaultOptions.Architectures, "list of architectures to process") pflag.StringSliceVar(&DefaultOptions.Architectures, "architectures", DefaultOptions.Architectures, "list of architectures to process")
pflag.StringVar(&DefaultOptions.ArtifactsPath, "artifacts-path", DefaultOptions.ArtifactsPath, "artifacts path") pflag.StringVar(&DefaultOptions.ArtifactsPath, "artifacts-path", DefaultOptions.ArtifactsPath, "artifacts path")
pflag.StringVar(&DefaultOptions.Tag, "tag", DefaultOptions.Tag, "tag (version) of the uploaded image") pflag.StringVar(&DefaultOptions.Tag, "tag", DefaultOptions.Tag, "tag (version) of the uploaded image")
pflag.StringVar(&DefaultOptions.AzureAbbrevTag, "abbrev-tag", DefaultOptions.AzureAbbrevTag, "abbreviated tag (version) of the uploaded image")
pflag.StringVar(&DefaultOptions.NamePrefix, "name-prefix", DefaultOptions.NamePrefix, "prefix for the name of the uploaded image") pflag.StringVar(&DefaultOptions.NamePrefix, "name-prefix", DefaultOptions.NamePrefix, "prefix for the name of the uploaded image")
pflag.StringSliceVar(&DefaultOptions.AWSRegions, "aws-regions", DefaultOptions.AWSRegions, "list of AWS regions to upload to") pflag.StringSliceVar(&DefaultOptions.AWSRegions, "aws-regions", DefaultOptions.AWSRegions, "list of AWS regions to upload to")
pflag.StringSliceVar(&DefaultOptions.AzureRegions, "azure-regions", DefaultOptions.AzureRegions, "list of Azure regions to upload to")
pflag.Parse() pflag.Parse()
@ -86,6 +81,13 @@ func run() error {
switch target { switch target {
case "aws": case "aws":
g.Go(func() error { g.Go(func() error {
if len(DefaultOptions.AWSRegions) == 0 {
DefaultOptions.AWSRegions, err = GetAWSDefaultRegions()
if err != nil {
log.Printf("failed to get a list of enabled AWS regions: %s, ignored", err)
}
}
aws := AWSUploader{ aws := AWSUploader{
Options: DefaultOptions, Options: DefaultOptions,
} }

View File

@ -21,19 +21,11 @@ type Options struct {
AWSRegions []string AWSRegions []string
// Azure options. // Azure options.
AzureRegions []Location AzureRegions []string
AzureCoreTag string
AzureAbbrevTag string
AzureGalleryName string AzureGalleryName string
AzurePreRelease string AzurePreRelease string
} }
// Location is the struct for the Azure Regions Options.
type Location struct {
Abbreviation string
Name string
}
// DefaultOptions used throughout the cli. // DefaultOptions used throughout the cli.
var DefaultOptions = Options{ var DefaultOptions = Options{
ArtifactsPath: "_out/", ArtifactsPath: "_out/",

View File

@ -13,7 +13,7 @@ function cloud_image_upload() {
} }
function get_ami_id() { function get_ami_id() {
jq -r ".[] | select(.region == \"${REGION}\") | select (.arch == \"amd64\") | .id" "${ARTIFACTS}/cloud-images.json" jq -r ".[] | select(.cloud == \"aws\") | select(.region == \"${REGION}\") | select (.arch == \"amd64\") | .id" "${ARTIFACTS}/cloud-images.json"
} }
function cloud_image_upload_with_extensions() { function cloud_image_upload_with_extensions() {

35
hack/test/e2e-azure-prepare.sh Executable file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -eou pipefail
source ./hack/test/e2e.sh
REGION="eastus"
function cloud_image_upload() {
CLOUD_IMAGES_EXTRA_ARGS=("--target-clouds=azure" "--architectures=amd64" "--azure-regions=${REGION}")
make cloud-images CLOUD_IMAGES_EXTRA_ARGS="${CLOUD_IMAGES_EXTRA_ARGS[*]}"
}
function get_os_id() {
jq -r ".[] | select(.cloud == \"azure\") | select(.region == \"${REGION}\") | select (.arch == \"amd64\") | .id" "${ARTIFACTS}/cloud-images.json"
}
cloud_image_upload
VM_OS_ID=$(get_os_id)
mkdir -p "${ARTIFACTS}/e2e-azure-generated"
NAME_PREFIX="talos-e2e-${SHA}-azure"
jq --null-input \
--arg VM_OS_ID "${VM_OS_ID}" \
--arg CLUSTER_NAME "${NAME_PREFIX}" \
--arg TALOS_VERSION_CONTRACT "${TALOS_VERSION}" \
--arg KUBERNETES_VERSION "${KUBERNETES_VERSION}" \
'{vm_os_id: $VM_OS_ID, cluster_name: $CLUSTER_NAME, talos_version_contract: $TALOS_VERSION_CONTRACT, kubernetes_version: $KUBERNETES_VERSION}' \
| jq -f hack/test/tfvars/azure.jq > "${ARTIFACTS}/e2e-azure-generated/vars.json"
cp hack/test/tfvars/*.yaml "${ARTIFACTS}/e2e-azure-generated"

View File

@ -4,42 +4,12 @@ set -eou pipefail
source ./hack/test/e2e.sh source ./hack/test/e2e.sh
function setup { cp "${ARTIFACTS}/e2e-azure-talosconfig" "${TALOSCONFIG}"
AZURE_STORAGE_ACCOUNT=talostesting cp "${ARTIFACTS}/e2e-azure-kubeconfig" "${KUBECONFIG}"
AZURE_STORAGE_CONTAINER=talostesting
AZURE_GROUP=talos
# Setup svc acct vars # set the talosconfig to use the first controlplane ip
set +x CONTROLPLANE0_NODE=$(${TALOSCTL} config info -o json | jq -r '.endpoints[0]')
echo ${AZURE_SVC_ACCT} | base64 -d > ${TMP}/svc-acct.json ${TALOSCTL} config node "${CONTROLPLANE0_NODE}"
AZURE_CLIENT_ID="$( cat ${TMP}/svc-acct.json | jq -r '.clientId' )"
AZURE_CLIENT_SECRET="$( cat ${TMP}/svc-acct.json | jq -r '.clientSecret' )"
AZURE_TENANT_ID="$( cat ${TMP}/svc-acct.json | jq -r '.tenantId' )"
# Login to azure
az login --service-principal --username ${AZURE_CLIENT_ID} --password ${AZURE_CLIENT_SECRET} --tenant ${AZURE_TENANT_ID} > /dev/null
set -x
# Untar image
tar -C ${TMP} -xf ${ARTIFACTS}/azure-amd64.tar.gz
# Get connection string
AZURE_STORAGE_CONNECTION_STRING=$(az storage account show-connection-string -n ${AZURE_STORAGE_ACCOUNT} -g ${AZURE_GROUP} -o tsv)
# Push blob
AZURE_STORAGE_CONNECTION_STRING="${AZURE_STORAGE_CONNECTION_STRING}" az storage blob upload --container-name ${AZURE_STORAGE_CONTAINER} -f ${TMP}/disk.vhd -n azure-${TAG}.vhd
# Delete image
az image delete --name talos-e2e-${TAG} -g ${AZURE_GROUP}
# Create image
az image create --name talos-e2e-${TAG} --source https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_STORAGE_CONTAINER}/azure-${TAG}.vhd --os-type linux -g ${AZURE_GROUP}
# Setup the cluster YAML.
sed "s/{{TAG}}/${TAG}/" ${PWD}/hack/test/manifests/azure-cluster.yaml > ${TMP}/cluster.yaml
}
setup
create_cluster_capi azure
run_talos_integration_test run_talos_integration_test
run_kubernetes_integration_test run_kubernetes_integration_test

22
hack/test/tfvars/azure.jq Normal file
View File

@ -0,0 +1,22 @@
{
"cluster_name": .cluster_name,
"talos_version_contract": .talos_version_contract,
"kubernetes_version": .kubernetes_version,
"azure_location": "eastus",
"control_plane": {
"vm_os_id": .vm_os_id,
"vm_size": "Standard_B2s"
},
"worker_groups": [
{
"name": "default",
"vm_os_id": .vm_os_id,
"vm_size": "Standard_B2s"
}
],
"extra_tags": {
"Cluster Name": .cluster_name,
"Project": "talos-e2e-ci",
"Environment": "ci"
},
}