chore: azure e2e

Add code to support azure e2e

Signed-off-by: Noel Georgi <git@frezbo.dev>
This commit is contained in:
Noel Georgi 2024-03-22 21:28:32 +05:30
parent 55dd41c0df
commit ee51f04af3
No known key found for this signature in database
GPG Key ID: 21A9F444075C9E36
10 changed files with 212 additions and 129 deletions

View File

@ -739,16 +739,17 @@ local integration_pipelines = [
Pipeline('cron-integration-qemu-csi', default_pipeline_steps + [integration_qemu_csi], [default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-integration-images', default_pipeline_steps + [integration_images], [default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-integration-reproducibility-test', default_pipeline_steps + [integration_reproducibility_test], [default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-image-factory', default_pipeline_steps + [
integration_factory_16_iso,
integration_factory_16_image,
integration_factory_16_pxe,
integration_factory_16_secureboot,
integration_factory_15_iso,
integration_factory_13_iso,
integration_factory_13_image,
],
[default_cron_pipeline]) + cron_trigger(['nightly']),
Pipeline('cron-image-factory',
default_pipeline_steps + [
integration_factory_16_iso,
integration_factory_16_image,
integration_factory_16_pxe,
integration_factory_16_secureboot,
integration_factory_15_iso,
integration_factory_13_iso,
integration_factory_13_image,
],
[default_cron_pipeline]) + cron_trigger(['nightly']),
];
@ -835,11 +836,75 @@ local E2EAWS(target) =
targets;
local E2EAzure() =
local depends_on = [load_artifacts];
local e2e_azure_prepare = Step(
'e2e-azure-prepare',
depends_on=depends_on,
environment=creds_env_vars {
IMAGE_REGISTRY: local_registry,
},
extra_commands=[
'az login --service-principal -u "$${AZURE_CLIENT_ID}" -p "$${AZURE_CLIENT_SECRET}" --tenant "$${AZURE_TENANT_ID}"',
'az storage blob upload-batch --overwrite -s _out --pattern "e2e-azure-generated/*" -d "${CI_COMMIT_SHA}${DRONE_TAG//./-}"',
]
);
local tf_apply = TriggerDownstream(
'tf-apply',
'e2e-talos-tf-apply',
['siderolabs/contrib@main'],
params=[
'BUCKET_PATH=${CI_COMMIT_SHA}${DRONE_TAG//./-}',
'TYPE=azure',
],
depends_on=[e2e_azure_prepare],
);
local e2e_azure_tf_apply_post = Step(
'e2e-azure-download-artifacts',
with_make=false,
environment=creds_env_vars,
extra_commands=[
'az login --service-principal -u "$${AZURE_CLIENT_ID}" -p "$${AZURE_CLIENT_SECRET}" --tenant "$${AZURE_TENANT_ID}"',
'az storage blob download -f _out/e2e-azure-talosconfig -n e2e-azure-talosconfig -c ${CI_COMMIT_SHA}${DRONE_TAG//./-}',
'az storage blob download -f _out/e2e-azure-kubeconfig -n e2e-azure-kubeconfig -c ${CI_COMMIT_SHA}${DRONE_TAG//./-}',
],
depends_on=[tf_apply],
);
local e2e_azure = Step(
'e2e-azure',
depends_on=[e2e_azure_tf_apply_post],
environment=creds_env_vars {}
);
local tf_destroy = TriggerDownstream(
'tf-destroy',
'e2e-talos-tf-destroy',
['siderolabs/contrib@main'],
params=[
'BUCKET_PATH=${CI_COMMIT_SHA}${DRONE_TAG//./-}',
'TYPE=azure',
],
depends_on=[e2e_azure],
when={
status: [
'failure',
'success',
],
},
);
local targets = [e2e_azure_prepare, tf_apply, e2e_azure_tf_apply_post, e2e_azure, tf_destroy];
targets;
local e2e_aws = [step for step in E2EAWS('default')];
local e2e_aws_nvidia_oss = [step for step in E2EAWS('nvidia-oss')];
local e2e_azure = Step('e2e-azure', depends_on=[e2e_capi], environment=creds_env_vars);
local e2e_azure = [step for step in E2EAzure()];
local e2e_gcp = Step('e2e-gcp', depends_on=[e2e_capi], environment=creds_env_vars);
local e2e_trigger(names) = {
@ -854,10 +919,12 @@ local e2e_pipelines = [
// regular pipelines, triggered on promote events
Pipeline('e2e-aws', default_pipeline_steps + e2e_aws) + e2e_trigger(['e2e-aws']),
Pipeline('e2e-aws-nvidia-oss', default_pipeline_steps + e2e_aws_nvidia_oss) + e2e_trigger(['e2e-aws-nvidia-oss']),
Pipeline('e2e-azure', default_pipeline_steps + e2e_azure) + e2e_trigger(['e2e-azure']),
Pipeline('e2e-gcp', default_pipeline_steps + [capi_docker, e2e_capi, e2e_gcp]) + e2e_trigger(['e2e-gcp']),
// cron pipelines, triggered on schedule events
Pipeline('cron-e2e-aws', default_pipeline_steps + e2e_aws, [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']),
Pipeline('cron-e2e-azure', default_pipeline_steps + e2e_azure, [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']),
Pipeline('cron-e2e-gcp', default_pipeline_steps + [capi_docker, e2e_capi, e2e_gcp], [default_cron_pipeline]) + cron_trigger(['thrice-daily', 'nightly']),
];

View File

@ -370,7 +370,7 @@ image-%: ## Builds the specified image. Valid options are aws, azure, digital-oc
docker run --rm -t -v /dev:/dev -v $(PWD)/$(ARTIFACTS):/secureboot:ro -v $(PWD)/$(ARTIFACTS):/out --network=host --privileged $(REGISTRY_AND_USERNAME)/imager:$(IMAGE_TAG) $* --arch $$arch $(IMAGER_ARGS) ; \
done
images-essential: image-aws image-gcp image-metal secureboot-installer ## Builds only essential images used in the CI (AWS, GCP, and Metal).
images-essential: image-aws image-azure image-gcp image-metal secureboot-installer ## Builds only essential images used in the CI (AWS, GCP, and Metal).
images: image-aws image-azure image-digital-ocean image-exoscale image-gcp image-hcloud image-iso image-metal image-nocloud image-opennebula image-openstack image-oracle image-scaleway image-upcloud image-vmware image-vultr ## Builds all known images (AWS, Azure, DigitalOcean, Exoscale, GCP, HCloud, Metal, NoCloud, OpenNebula, Openstack, Oracle, Scaleway, UpCloud, Vultr and VMware).

View File

@ -4,4 +4,4 @@ set -e
cd hack/cloud-image-uploader
go run . --artifacts-path="../../${ARTIFACTS}" --tag="${TAG}" --abbrev-tag="${ABBREV_TAG}" "$@"
go run . --artifacts-path="../../${ARTIFACTS}" --tag="${TAG}" "$@"

View File

@ -27,6 +27,7 @@ import (
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/blang/semver/v4"
"github.com/siderolabs/gen/channel"
"github.com/siderolabs/gen/xslices"
"github.com/ulikunitz/xz"
"golang.org/x/sync/errgroup"
)
@ -47,29 +48,31 @@ var azureArchitectures = map[string]string{
type AzureUploader struct {
Options Options
preRelease bool
helper azureHelper
}
// extractVersion extracts the version number in the format of int.int.int for Azure and assigns to the Options.AzureTag value.
func (azu *AzureUploader) setVersion() error {
v, err := semver.ParseTolerant(azu.Options.AzureAbbrevTag)
v, err := semver.ParseTolerant(azu.Options.Tag)
if err != nil {
return err
}
versionCore := fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
if fmt.Sprintf("v%s", versionCore) != azu.Options.AzureAbbrevTag {
azu.helper.version = versionCore
azu.Options.AzureGalleryName = "SideroLabs"
if fmt.Sprintf("v%s", versionCore) != azu.Options.Tag {
azu.preRelease = true
azu.Options.AzureGalleryName = "SideroGalleryTest"
azu.Options.AzureCoreTag = versionCore
fmt.Println(azu.Options.AzureGalleryName)
} else {
azu.Options.AzureGalleryName = "SideroLabs"
azu.Options.AzureCoreTag = versionCore
fmt.Println(azu.Options.AzureGalleryName)
}
return err
log.Println("azure: using Azure Gallery:", azu.Options.AzureGalleryName)
return nil
}
// AzureGalleryUpload uploads the image to Azure.
@ -91,11 +94,13 @@ func (azu *AzureUploader) AzureGalleryUpload(ctx context.Context) error {
return fmt.Errorf("error setting default Azure credentials: %w", err)
}
log.Printf("azure: getting locations")
if len(azu.Options.AzureRegions) == 0 {
regions, err := azu.helper.getAzureLocations(ctx)
if err != nil {
return fmt.Errorf("azure: error setting default Azure credentials: %w", err)
}
err = azu.helper.getAzureLocations(ctx)
if err != nil {
return fmt.Errorf("azure: error setting default Azure credentials: %w", err)
azu.Options.AzureRegions = regions
}
// Upload blob
@ -245,14 +250,16 @@ uploadLoop:
}
func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch string) error {
targetRegions := make([]*armcompute.TargetRegion, 0, len(azu.helper.locations))
var targetRegions []*armcompute.TargetRegion
for _, region := range azu.helper.locations {
targetRegions = append(targetRegions, &armcompute.TargetRegion{
Name: to.Ptr(region.Name),
ExcludeFromLatest: to.Ptr(false),
RegionalReplicaCount: to.Ptr[int32](1),
StorageAccountType: to.Ptr(armcompute.StorageAccountTypeStandardLRS),
if !azu.preRelease {
targetRegions = xslices.Map(azu.Options.AzureRegions, func(region string) *armcompute.TargetRegion {
return &armcompute.TargetRegion{
Name: to.Ptr(region),
ExcludeFromLatest: to.Ptr(false),
RegionalReplicaCount: to.Ptr[int32](1),
StorageAccountType: to.Ptr(armcompute.StorageAccountTypeStandardLRS),
}
})
}
@ -265,8 +272,8 @@ func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch stri
}
for _, v := range page.Value {
if *v.Name == azu.Options.AzureCoreTag {
log.Printf("azure: image version exists for %s\n azure: removing old image version\n", *v.Name)
if *v.Name == azu.helper.version {
log.Printf("azure: image version exists for %s\n", *v.Name)
err = azu.deleteImageVersion(ctx, arch)
if err != nil {
@ -283,7 +290,7 @@ func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch stri
resourceGroupName,
azu.Options.AzureGalleryName,
fmt.Sprintf("talos-%s", azureArchitectures[arch]),
azu.Options.AzureCoreTag,
azu.helper.version,
armcompute.GalleryImageVersion{
Location: to.Ptr(defaultRegion),
Properties: &armcompute.GalleryImageVersionProperties{
@ -309,21 +316,34 @@ func (azu *AzureUploader) createAzureImageVersion(ctx context.Context, arch stri
return fmt.Errorf("azure: failed to create image version: %w", err)
}
_, err = poller.PollUntilDone(ctx, nil)
res, err := poller.PollUntilDone(ctx, nil)
if err != nil {
return fmt.Errorf("azure: failed to pull the result for image version creation: %w", err)
}
return err
for _, region := range azu.Options.AzureRegions {
pushResult(CloudImage{
Cloud: "azure",
Tag: azu.Options.Tag,
Region: region,
Arch: arch,
Type: "vhd",
ID: *res.ID,
})
}
return nil
}
func (azu *AzureUploader) deleteImageVersion(ctx context.Context, arch string) error {
log.Println("azure: removing old image version")
poller, err := azu.helper.clientFactory.NewGalleryImageVersionsClient().BeginDelete(
ctx,
resourceGroupName,
azu.Options.AzureGalleryName,
fmt.Sprintf("talos-%s", azureArchitectures[arch]),
azu.Options.AzureCoreTag,
azu.helper.version,
nil)
if err != nil {
return fmt.Errorf("azure: failed to delete image: %w", err)
@ -334,16 +354,16 @@ func (azu *AzureUploader) deleteImageVersion(ctx context.Context, arch string) e
return fmt.Errorf("azure: failed to pull the result for image deletion: %w", err)
}
return err
return nil
}
type azureHelper struct {
version string
subscriptionID string
clientFactory *armcompute.ClientFactory
cred *azidentity.DefaultAzureCredential
authorizer autorest.Authorizer
providersClient resources.ProvidersClient
locations map[string]Location
}
func (helper *azureHelper) setDefaultAzureCreds() error {
@ -385,34 +405,21 @@ func (helper *azureHelper) setDefaultAzureCreds() error {
return nil
}
//nolint:gocyclo
func (helper *azureHelper) getAzureLocations(ctx context.Context) error {
providers, err := helper.listProviders(ctx)
func (helper *azureHelper) getAzureLocations(ctx context.Context) ([]string, error) {
var regions []string
result, err := helper.providersClient.Get(ctx, "Microsoft.Compute", "")
if err != nil {
return err
return nil, fmt.Errorf("azure: error getting Microsoft.Compute: %w", err)
}
var computeProvider resources.Provider
for _, provider := range providers {
if provider.Namespace != nil && *provider.Namespace == "Microsoft.Compute" {
computeProvider = provider
break
}
}
helper.locations = make(map[string]Location)
if computeProvider.ResourceTypes != nil {
for _, rt := range *computeProvider.ResourceTypes {
if result.ResourceTypes != nil {
for _, rt := range *result.ResourceTypes {
if rt.ResourceType != nil && *rt.ResourceType == "virtualMachines" {
if rt.Locations != nil {
for _, region := range *rt.Locations {
abbr := strings.ReplaceAll(region, " ", "")
abbr = strings.ToLower(abbr)
helper.locations[abbr] = Location{Abbreviation: abbr, Name: region}
}
regions = xslices.Map(*rt.Locations, func(s string) string {
return strings.ToLower(strings.ReplaceAll(s, " ", ""))
})
}
break
@ -420,17 +427,5 @@ func (helper *azureHelper) getAzureLocations(ctx context.Context) error {
}
}
return err
}
func (helper *azureHelper) listProviders(ctx context.Context) (result []resources.Provider, err error) {
for list, err := helper.providersClient.List(ctx, ""); list.NotDone(); err = list.NextWithContext(ctx) {
if err != nil {
return nil, fmt.Errorf("azure: error getting providers list: %v", err)
}
result = append(result, list.Values()...)
}
return
return regions, nil
}

View File

@ -54,19 +54,14 @@ func main() {
func run() error {
var err error
DefaultOptions.AWSRegions, err = GetAWSDefaultRegions()
if err != nil {
log.Printf("failed to get a list of enabled AWS regions: %s, ignored", err)
}
pflag.StringSliceVar(&DefaultOptions.TargetClouds, "target-clouds", DefaultOptions.TargetClouds, "cloud targets to upload to")
pflag.StringSliceVar(&DefaultOptions.Architectures, "architectures", DefaultOptions.Architectures, "list of architectures to process")
pflag.StringVar(&DefaultOptions.ArtifactsPath, "artifacts-path", DefaultOptions.ArtifactsPath, "artifacts path")
pflag.StringVar(&DefaultOptions.Tag, "tag", DefaultOptions.Tag, "tag (version) of the uploaded image")
pflag.StringVar(&DefaultOptions.AzureAbbrevTag, "abbrev-tag", DefaultOptions.AzureAbbrevTag, "abbreviated tag (version) of the uploaded image")
pflag.StringVar(&DefaultOptions.NamePrefix, "name-prefix", DefaultOptions.NamePrefix, "prefix for the name of the uploaded image")
pflag.StringSliceVar(&DefaultOptions.AWSRegions, "aws-regions", DefaultOptions.AWSRegions, "list of AWS regions to upload to")
pflag.StringSliceVar(&DefaultOptions.AzureRegions, "azure-regions", DefaultOptions.AzureRegions, "list of Azure regions to upload to")
pflag.Parse()
@ -86,6 +81,13 @@ func run() error {
switch target {
case "aws":
g.Go(func() error {
if len(DefaultOptions.AWSRegions) == 0 {
DefaultOptions.AWSRegions, err = GetAWSDefaultRegions()
if err != nil {
log.Printf("failed to get a list of enabled AWS regions: %s, ignored", err)
}
}
aws := AWSUploader{
Options: DefaultOptions,
}

View File

@ -21,19 +21,11 @@ type Options struct {
AWSRegions []string
// Azure options.
AzureRegions []Location
AzureCoreTag string
AzureAbbrevTag string
AzureRegions []string
AzureGalleryName string
AzurePreRelease string
}
// Location is the struct for the Azure Regions Options.
type Location struct {
Abbreviation string
Name string
}
// DefaultOptions used throughout the cli.
var DefaultOptions = Options{
ArtifactsPath: "_out/",

View File

@ -13,7 +13,7 @@ function cloud_image_upload() {
}
function get_ami_id() {
jq -r ".[] | select(.region == \"${REGION}\") | select (.arch == \"amd64\") | .id" "${ARTIFACTS}/cloud-images.json"
jq -r ".[] | select(.cloud == \"aws\") | select(.region == \"${REGION}\") | select (.arch == \"amd64\") | .id" "${ARTIFACTS}/cloud-images.json"
}
function cloud_image_upload_with_extensions() {

35
hack/test/e2e-azure-prepare.sh Executable file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -eou pipefail
source ./hack/test/e2e.sh
REGION="eastus"
function cloud_image_upload() {
CLOUD_IMAGES_EXTRA_ARGS=("--target-clouds=azure" "--architectures=amd64" "--azure-regions=${REGION}")
make cloud-images CLOUD_IMAGES_EXTRA_ARGS="${CLOUD_IMAGES_EXTRA_ARGS[*]}"
}
function get_os_id() {
jq -r ".[] | select(.cloud == \"azure\") | select(.region == \"${REGION}\") | select (.arch == \"amd64\") | .id" "${ARTIFACTS}/cloud-images.json"
}
cloud_image_upload
VM_OS_ID=$(get_os_id)
mkdir -p "${ARTIFACTS}/e2e-azure-generated"
NAME_PREFIX="talos-e2e-${SHA}-azure"
jq --null-input \
--arg VM_OS_ID "${VM_OS_ID}" \
--arg CLUSTER_NAME "${NAME_PREFIX}" \
--arg TALOS_VERSION_CONTRACT "${TALOS_VERSION}" \
--arg KUBERNETES_VERSION "${KUBERNETES_VERSION}" \
'{vm_os_id: $VM_OS_ID, cluster_name: $CLUSTER_NAME, talos_version_contract: $TALOS_VERSION_CONTRACT, kubernetes_version: $KUBERNETES_VERSION}' \
| jq -f hack/test/tfvars/azure.jq > "${ARTIFACTS}/e2e-azure-generated/vars.json"
cp hack/test/tfvars/*.yaml "${ARTIFACTS}/e2e-azure-generated"

View File

@ -4,42 +4,12 @@ set -eou pipefail
source ./hack/test/e2e.sh
function setup {
AZURE_STORAGE_ACCOUNT=talostesting
AZURE_STORAGE_CONTAINER=talostesting
AZURE_GROUP=talos
cp "${ARTIFACTS}/e2e-azure-talosconfig" "${TALOSCONFIG}"
cp "${ARTIFACTS}/e2e-azure-kubeconfig" "${KUBECONFIG}"
# Setup svc acct vars
set +x
echo ${AZURE_SVC_ACCT} | base64 -d > ${TMP}/svc-acct.json
AZURE_CLIENT_ID="$( cat ${TMP}/svc-acct.json | jq -r '.clientId' )"
AZURE_CLIENT_SECRET="$( cat ${TMP}/svc-acct.json | jq -r '.clientSecret' )"
AZURE_TENANT_ID="$( cat ${TMP}/svc-acct.json | jq -r '.tenantId' )"
# set the talosconfig to use the first controlplane ip
CONTROLPLANE0_NODE=$(${TALOSCTL} config info -o json | jq -r '.endpoints[0]')
${TALOSCTL} config node "${CONTROLPLANE0_NODE}"
# Login to azure
az login --service-principal --username ${AZURE_CLIENT_ID} --password ${AZURE_CLIENT_SECRET} --tenant ${AZURE_TENANT_ID} > /dev/null
set -x
# Untar image
tar -C ${TMP} -xf ${ARTIFACTS}/azure-amd64.tar.gz
# Get connection string
AZURE_STORAGE_CONNECTION_STRING=$(az storage account show-connection-string -n ${AZURE_STORAGE_ACCOUNT} -g ${AZURE_GROUP} -o tsv)
# Push blob
AZURE_STORAGE_CONNECTION_STRING="${AZURE_STORAGE_CONNECTION_STRING}" az storage blob upload --container-name ${AZURE_STORAGE_CONTAINER} -f ${TMP}/disk.vhd -n azure-${TAG}.vhd
# Delete image
az image delete --name talos-e2e-${TAG} -g ${AZURE_GROUP}
# Create image
az image create --name talos-e2e-${TAG} --source https://${AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${AZURE_STORAGE_CONTAINER}/azure-${TAG}.vhd --os-type linux -g ${AZURE_GROUP}
# Setup the cluster YAML.
sed "s/{{TAG}}/${TAG}/" ${PWD}/hack/test/manifests/azure-cluster.yaml > ${TMP}/cluster.yaml
}
setup
create_cluster_capi azure
run_talos_integration_test
run_kubernetes_integration_test

22
hack/test/tfvars/azure.jq Normal file
View File

@ -0,0 +1,22 @@
{
"cluster_name": .cluster_name,
"talos_version_contract": .talos_version_contract,
"kubernetes_version": .kubernetes_version,
"azure_location": "eastus",
"control_plane": {
"vm_os_id": .vm_os_id,
"vm_size": "Standard_B2s"
},
"worker_groups": [
{
"name": "default",
"vm_os_id": .vm_os_id,
"vm_size": "Standard_B2s"
}
],
"extra_tags": {
"Cluster Name": .cluster_name,
"Project": "talos-e2e-ci",
"Environment": "ci"
},
}