chore: build arm64 images in CI

This changes installer image/iso output to be tar via stdout
(optionally), so that we can copy back artifacts back from remote docker
daemon.

Fixes #2776

Signed-off-by: Andrey Smirnov <smirnov.andrey@gmail.com>
This commit is contained in:
Andrey Smirnov 2020-11-13 17:17:07 +03:00 committed by talos-bot
parent 16c5fa64f6
commit 61facf700a
15 changed files with 80 additions and 34 deletions

View File

@ -215,8 +215,10 @@ local installer = Step("installer", depends_on=[initramfs], environment={"REGIST
local talos = Step("talos", depends_on=[installer], environment={"REGISTRY": local_registry, "PUSH": true});
local lint = Step("lint", depends_on=[check_dirty]);
local talosctl_cni_bundle = Step('talosctl-cni-bundle', depends_on=[lint]);
local images = Step("images", depends_on=[installer], environment={"REGISTRY": local_registry});
local iso = Step('iso', depends_on=[images], environment={"REGISTRY": local_registry});
local images_amd64 = Step("images-amd64", target="images", depends_on=[installer], environment={"REGISTRY": local_registry});
local images_arm64 = Step("images-arm64", target="images", depends_on=[installer], environment={"REGISTRY": local_registry, "DOCKER_HOST": "tcp://docker-arm64.ci.svc:2376"});
local iso_amd64 = Step("iso-amd64", target="iso", depends_on=[images_amd64, images_arm64], environment={"REGISTRY": local_registry});
local iso_arm64 = Step("iso-arm64", target="iso", depends_on=[images_amd64, images_arm64], environment={"REGISTRY": local_registry, "DOCKER_HOST": "tcp://docker-arm64.ci.svc:2376"});
local unit_tests = Step("unit-tests", depends_on=[initramfs]);
local unit_tests_race = Step("unit-tests-race", depends_on=[initramfs]);
local e2e_docker = Step("e2e-docker-short", depends_on=[talos, talosctl_linux, unit_tests, unit_tests_race], target="e2e-docker", environment={"SHORT_INTEGRATION_TEST": "yes", "REGISTRY": local_registry});
@ -295,8 +297,10 @@ local default_steps = [
talos,
lint,
talosctl_cni_bundle,
images,
iso,
images_amd64,
images_arm64,
iso_amd64,
iso_arm64,
unit_tests,
unit_tests_race,
coverage,
@ -479,22 +483,28 @@ local release = {
draft: true,
note: '_out/RELEASE_NOTES.md',
files: [
'_out/aws.tar.gz',
'_out/azure.tar.gz',
'_out/aws-amd64.tar.gz',
'_out/aws-arm64.tar.gz',
'_out/azure-amd64.tar.gz',
'_out/azure-arm64.tar.gz',
'_out/boot-amd64.tar.gz',
'_out/boot-arm64.tar.gz',
'_out/digital-ocean.tar.gz',
'_out/gcp.tar.gz',
'_out/digital-ocean-amd64.tar.gz',
'_out/digital-ocean-arm64.tar.gz',
'_out/gcp-amd64.tar.gz',
'_out/gcp-arm64.tar.gz',
'_out/initramfs-amd64.xz',
'_out/initramfs-arm64.xz',
'_out/talos-amd64.iso',
'_out/talos-arm64.iso',
'_out/talosctl-cni-bundle-amd64.tar.gz',
'_out/talosctl-cni-bundle-arm64.tar.gz',
'_out/talosctl-darwin-amd64',
'_out/talosctl-linux-amd64',
'_out/talosctl-linux-arm64',
'_out/talosctl-linux-armv7',
'_out/vmware.ova',
'_out/vmware-amd64.ova',
'_out/vmware-arm64.ova',
'_out/vmlinuz-amd64',
'_out/vmlinuz-arm64',
],
@ -503,7 +513,7 @@ local release = {
when: {
event: ['tag'],
},
depends_on: [kernel.name, iso.name, boot.name, talosctl_cni_bundle.name, images.name, push.name, release_notes.name]
depends_on: [kernel.name, boot.name, talosctl_cni_bundle.name, images_amd64.name, images_arm64.name, iso_amd64.name, iso_arm64.name, push.name, release_notes.name]
};
local release_steps = default_steps + [

View File

@ -171,13 +171,13 @@ talosctl-%:
talosctl: $(TALOSCTL_DEFAULT_TARGET) ## Builds the talosctl binary for the local machine.
image-%: ## Builds the specified image. Valid options are aws, azure, digital-ocean, gcp, and vmware (e.g. image-aws)
@docker run --rm -v /dev:/dev -v $(PWD)/$(ARTIFACTS):/out --privileged $(REGISTRY)/$(USERNAME)/installer:$(TAG) image --platform $*
@docker run --rm -v /dev:/dev --privileged $(REGISTRY)/$(USERNAME)/installer:$(TAG) image --platform $* --tar-to-stdout | tar xz -C $(ARTIFACTS)
images: image-aws image-azure image-digital-ocean image-gcp image-vmware ## Builds all known images (AWS, Azure, Digital Ocean, GCP, and VMware).
.PHONY: iso
iso: ## Builds the ISO and outputs it to the artifact directory.
@docker run --rm -i -v $(PWD)/$(ARTIFACTS):/out $(REGISTRY)/$(USERNAME)/installer:$(TAG) iso
@docker run --rm -i $(REGISTRY)/$(USERNAME)/installer:$(TAG) iso --tar-to-stdout | tar xz -C $(ARTIFACTS)
.PHONY: boot
boot: ## Creates a compressed tarball that includes vmlinuz-{amd64,arm64} and initramfs-{amd64,arm64}.xz. Note that these files must already be present in the artifacts directory.

View File

@ -5,8 +5,12 @@
package cmd
import (
"context"
"fmt"
"log"
"os"
"path/filepath"
stdruntime "runtime"
"strings"
"github.com/spf13/cobra"
@ -17,11 +21,15 @@ import (
"github.com/talos-systems/talos/cmd/installer/pkg/qemuimg"
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime"
"github.com/talos-systems/talos/internal/app/machined/pkg/runtime/v1alpha1/platform"
"github.com/talos-systems/talos/pkg/archiver"
"github.com/talos-systems/talos/pkg/cmd"
"github.com/talos-systems/talos/pkg/machinery/constants"
)
var outputArg string
var (
outputArg string
tarToStdout bool
)
// imageCmd represents the image command.
var imageCmd = &cobra.Command{
@ -37,6 +45,7 @@ var imageCmd = &cobra.Command{
func init() {
imageCmd.Flags().StringVar(&outputArg, "output", "/out", "The output path")
imageCmd.Flags().BoolVar(&tarToStdout, "tar-to-stdout", false, "Tar output and send to stdout")
rootCmd.AddCommand(imageCmd)
}
@ -47,6 +56,10 @@ func runImageCmd() (err error) {
return err
}
if err = os.MkdirAll(outputArg, 0o777); err != nil {
return err
}
log.Printf("creating image for %s", p.Name())
log.Print("creating RAW disk")
@ -88,6 +101,12 @@ func runImageCmd() (err error) {
return err
}
if tarToStdout {
if err := tarOutput(); err != nil {
return err
}
}
return nil
}
@ -100,7 +119,7 @@ func finalize(platform runtime.Platform, img string) (err error) {
switch platform.Name() {
case "aws":
if err = tar("aws.tar.gz", file, dir); err != nil {
if err = tar(fmt.Sprintf("aws-%s.tar.gz", stdruntime.GOARCH), file, dir); err != nil {
return err
}
case "azure":
@ -110,15 +129,15 @@ func finalize(platform runtime.Platform, img string) (err error) {
return err
}
if err = tar("azure.tar.gz", file, dir); err != nil {
if err = tar(fmt.Sprintf("azure-%s.tar.gz", stdruntime.GOARCH), file, dir); err != nil {
return err
}
case "digital-ocean":
if err = tar("digital-ocean.tar.gz", file, dir); err != nil {
if err = tar(fmt.Sprintf("digital-ocean-%s.tar.gz", stdruntime.GOARCH), file, dir); err != nil {
return err
}
case "gcp":
if err = tar("gcp.tar.gz", file, dir); err != nil {
if err = tar(fmt.Sprintf("gcp-%s.tar.gz", stdruntime.GOARCH), file, dir); err != nil {
return err
}
case "vmware":
@ -137,3 +156,7 @@ func tar(filename, src, dir string) error {
return nil
}
func tarOutput() error {
return archiver.TarGz(context.Background(), outputArg, os.Stdout)
}

View File

@ -46,11 +46,17 @@ var isoCmd = &cobra.Command{
}
func init() {
isoCmd.Flags().StringVar(&outputArg, "output", "/out", "The output path")
isoCmd.Flags().BoolVar(&tarToStdout, "tar-to-stdout", false, "Tar output and send to stdout")
rootCmd.AddCommand(isoCmd)
}
// nolint: gocyclo
func runISOCmd() error {
if err := os.MkdirAll(outputArg, 0o777); err != nil {
return err
}
files := map[string]string{
"/usr/install/vmlinuz": "/mnt/boot/vmlinuz",
"/usr/install/initramfs.xz": "/mnt/boot/initramfs.xz",
@ -122,5 +128,11 @@ func runISOCmd() error {
return err
}
if tarToStdout {
if err := tarOutput(); err != nil {
return err
}
}
return nil
}

View File

@ -12,6 +12,7 @@ import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"text/template"
@ -184,7 +185,7 @@ func CreateOVAFromRAW(name, src, out string) (err error) {
return err
}
if _, err = cmd.Run("tar", "-cvf", filepath.Join(out, "vmware.ova"), "-C", dir, name+".ovf", name+".mf", name+".vmdk"); err != nil {
if _, err = cmd.Run("tar", "-cvf", filepath.Join(out, fmt.Sprintf("vmware-%s.ova", runtime.GOARCH)), "-C", dir, name+".ovf", name+".mf", name+".vmdk"); err != nil {
return err
}

View File

@ -57,7 +57,7 @@ kind: TalosControlPlane
metadata:
name: talos-e2e-{{TAG}}-aws-controlplane
spec:
version: v1.19.4
version: v1.19.3
replicas: 3
infrastructureTemplate:
kind: AWSMachineTemplate
@ -111,7 +111,7 @@ spec:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: AWSMachineTemplate
name: talos-e2e-{{TAG}}-aws-workers
version: 1.19.4
version: 1.19.3
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: AWSMachineTemplate

View File

@ -44,7 +44,7 @@ kind: TalosControlPlane
metadata:
name: talos-e2e-{{TAG}}-gcp-controlplane
spec:
version: v1.19.4
version: v1.19.3
replicas: 3
infrastructureTemplate:
kind: GCPMachineTemplate
@ -102,7 +102,7 @@ spec:
kind: GCPMachineTemplate
name: talos-e2e-{{TAG}}-gcp-workers
namespace: default
version: 1.19.4
version: 1.19.3
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
kind: GCPMachineTemplate

View File

@ -12,7 +12,7 @@ function setup {
mkdir -p ${TMP}
# Untar image
tar -C ${TMP} -xf ${ARTIFACTS}/aws.tar.gz
tar -C ${TMP} -xf ${ARTIFACTS}/aws-amd64.tar.gz
# Upload Image
echo "uploading image to s3"

View File

@ -19,9 +19,9 @@ function setup {
# Login to azure
az login --service-principal --username ${AZURE_CLIENT_ID} --password ${AZURE_CLIENT_SECRET} --tenant ${AZURE_TENANT_ID} > /dev/null
set -x
# Untar image
tar -C ${TMP} -xf ${ARTIFACTS}/azure.tar.gz
tar -C ${TMP} -xf ${ARTIFACTS}/azure-amd64.tar.gz
# Get connection string
AZURE_STORAGE_CONNECTION_STRING=$(az storage account show-connection-string -n ${AZURE_STORAGE_ACCOUNT} -g ${AZURE_GROUP} -o tsv)

View File

@ -9,8 +9,8 @@ function setup {
echo ${GCE_SVC_ACCT} | base64 -d > ${TMP}/svc-acct.json
gcloud auth activate-service-account --key-file ${TMP}/svc-acct.json
set -x
gsutil cp ${ARTIFACTS}/gcp.tar.gz gs://talos-e2e/gcp-${SHA}.tar.gz
gsutil cp ${ARTIFACTS}/gcp-amd64.tar.gz gs://talos-e2e/gcp-${SHA}.tar.gz
gcloud --quiet --project talos-testbed compute images delete talos-e2e-${SHA} || true
gcloud --quiet --project talos-testbed compute images create talos-e2e-${SHA} --source-uri gs://talos-e2e/gcp-${SHA}.tar.gz
sed -e "s/{{TAG}}/${SHA}/" ${PWD}/hack/test/capi/cluster-gcp.yaml > ${TMP}/cluster.yaml

View File

@ -134,7 +134,7 @@ doctl compute droplet create \
### Retrieve the `kubeconfig`
To configure `talosctl` we will need the first controla plane node's IP:
To configure `talosctl` we will need the first control plane node's IP:
```bash
doctl compute droplet get --format PublicIPv4 <droplet ID>

View File

@ -41,7 +41,7 @@ Note that the role should be associated with the S3 bucket we created above.
First, download the AWS image from a Talos release:
```bash
curl -LO https://github.com/talos-systems/talos/releases/latest/download/aws.tar.gz | tar -xv
curl -LO https://github.com/talos-systems/talos/releases/latest/download/aws-amd64.tar.gz | tar -xv
```
Copy the RAW disk to S3 and import it as a snapshot:

View File

@ -36,7 +36,7 @@ export CONNECTION=$(az storage account show-connection-string \
### Create the Image
First, download the Azure image from a [Talos release](https://github.com/talos-systems/talos/releases).
Once downloaded, untar with `tar -xvf /path/to/azure.tar.gz`
Once downloaded, untar with `tar -xvf /path/to/azure-amd64.tar.gz`
#### Upload the VHD

View File

@ -134,7 +134,7 @@ doctl compute droplet create \
### Retrieve the `kubeconfig`
To configure `talosctl` we will need the first controla plane node's IP:
To configure `talosctl` we will need the first control plane node's IP:
```bash
doctl compute droplet get --format PublicIPv4 <droplet ID>

View File

@ -23,14 +23,14 @@ export REGION="us-central1"
### Create the Image
First, download the Google Cloud image from a Talos [release](https://github.com/talos-systems/talos/releases).
These images are called `gcp.tar.gz`.
These images are called `gcp-$ARCH.tar.gz`.
#### Upload the Image
Once you have downloaded the image, you can upload it to your storage bucket with:
```bash
gsutil cp /path/to/gcp.tar.gz gs://$STORAGE_BUCKET
gsutil cp /path/to/gcp-amd64.tar.gz gs://$STORAGE_BUCKET
```
#### Register the image
@ -39,7 +39,7 @@ Now that the image is present in our bucket, we'll register it.
```bash
gcloud compute images create talos \
--source-uri=gs://$STORAGE_BUCKET/gcp.tar.gz \
--source-uri=gs://$STORAGE_BUCKET/gcp-amd64.tar.gz \
--guest-os-features=VIRTIO_SCSI_MULTIQUEUE
```