chore(ci): e2e gcp

Add e2e-gcp.

Also drop other CAPI stuff.

Fixes: #8842

Signed-off-by: Noel Georgi <git@frezbo.dev>
This commit is contained in:
Noel Georgi 2024-09-05 21:55:57 +05:30
parent cd7c682662
commit d64ce44e46
No known key found for this signature in database
GPG Key ID: 21A9F444075C9E36
45 changed files with 409 additions and 966 deletions

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-05T10:32:02Z by kres b5ca957.
# Generated on 2024-09-06T05:05:58Z by kres 8be5fa7.
name: default
concurrency:
@ -410,7 +410,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"
@ -536,7 +536,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"
@ -686,7 +686,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"
@ -836,7 +836,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"
@ -1087,7 +1087,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"
@ -1261,7 +1261,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"
@ -1440,6 +1440,129 @@ jobs:
~/.talos/clusters/**/*.log
!~/.talos/clusters/**/swtpm.log
retention-days: "5"
integration-gcp:
permissions:
actions: read
contents: write
issues: read
packages: write
pull-requests: read
runs-on:
- self-hosted
- generic
if: contains(fromJSON(needs.default.outputs.labels), 'integration/gcp')
needs:
- default
steps:
- name: gather-system-info
id: system-info
uses: kenchan0130/actions-system-info@v1.3.0
continue-on-error: true
- name: print-system-info
run: |
MEMORY_GB=$((${{ steps.system-info.outputs.totalmem }}/1024/1024/1024))
OUTPUTS=(
"CPU Core: ${{ steps.system-info.outputs.cpu-core }}"
"CPU Model: ${{ steps.system-info.outputs.cpu-model }}"
"Hostname: ${{ steps.system-info.outputs.hostname }}"
"NodeName: ${NODE_NAME}"
"Kernel release: ${{ steps.system-info.outputs.kernel-release }}"
"Kernel version: ${{ steps.system-info.outputs.kernel-version }}"
"Name: ${{ steps.system-info.outputs.name }}"
"Platform: ${{ steps.system-info.outputs.platform }}"
"Release: ${{ steps.system-info.outputs.release }}"
"Total memory: ${MEMORY_GB} GB"
)
for OUTPUT in "${OUTPUTS[@]}";do
echo "${OUTPUT}"
done
continue-on-error: true
- name: checkout
uses: actions/checkout@v4
- name: Unshallow
run: |
git fetch --prune --unshallow
- name: Set up Docker Buildx
id: setup-buildx
uses: docker/setup-buildx-action@v3
with:
driver: remote
endpoint: tcp://buildkit-amd64.ci.svc.cluster.local:1234
timeout-minutes: 10
- name: Mask secrets
run: |
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"
- name: Download artifacts
if: github.event_name != 'schedule'
uses: actions/download-artifact@v4
with:
name: talos-artifacts
path: _out
- name: Fix artifact permissions
if: github.event_name != 'schedule'
run: |
xargs -a _out/executable-artifacts -I {} chmod +x {}
- name: uki-certs
if: github.event_name == 'schedule'
env:
PLATFORM: linux/amd64
run: |
make uki-certs
- name: build
if: github.event_name == 'schedule'
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
PLATFORM: linux/amd64
PUSH: "true"
run: |
make talosctl-linux-amd64 kernel sd-boot sd-stub initramfs installer imager talos _out/integration-test-linux-amd64
- name: talosctl-cni-bundle
if: github.event_name == 'schedule'
run: |
make talosctl-cni-bundle
- name: images-essential
if: github.event_name == 'schedule'
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
PLATFORM: linux/amd64
run: |
make images-essential
- name: e2e-gcp-prepare
run: |
make e2e-gcp-prepare
- name: checkout contrib
uses: actions/checkout@v4
with:
path: _out/contrib
ref: main
repository: siderolabs/contrib
- name: setup tf
uses: hashicorp/setup-terraform@v3
with:
terraform_wrapper: "false"
- name: tf apply
env:
TF_E2E_ACTION: apply
TF_E2E_TEST_TYPE: gcp
TF_SCRIPT_DIR: _out/contrib
run: |
make e2e-cloud-tf
- name: e2e-gcp
run: |
make e2e-gcp
- name: tf destroy
if: always()
env:
TF_E2E_ACTION: destroy
TF_E2E_TEST_TYPE: gcp
TF_SCRIPT_DIR: _out/contrib
run: |
make e2e-cloud-tf
integration-image-factory:
permissions:
actions: read
@ -3277,7 +3400,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-05-27T16:20:10Z by kres bcb280a.
# Generated on 2024-09-05T17:53:07Z by kres 8be5fa7.
name: integration-aws-cron
concurrency:
@ -54,7 +54,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-08-09T09:42:44Z by kres dbf015a.
# Generated on 2024-09-05T17:53:07Z by kres 8be5fa7.
name: integration-aws-nvidia-nonfree-cron
concurrency:
@ -54,7 +54,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-08-09T09:42:44Z by kres dbf015a.
# Generated on 2024-09-05T17:53:07Z by kres 8be5fa7.
name: integration-aws-nvidia-oss-cron
concurrency:
@ -54,7 +54,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-05-27T16:20:10Z by kres bcb280a.
# Generated on 2024-09-05T17:53:07Z by kres 8be5fa7.
name: integration-azure-cron
concurrency:
@ -54,7 +54,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-05-27T16:20:10Z by kres bcb280a.
# Generated on 2024-09-05T17:53:07Z by kres 8be5fa7.
name: integration-cloud-images-cron
concurrency:
@ -54,7 +54,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-05-27T16:20:10Z by kres bcb280a.
# Generated on 2024-09-05T17:53:07Z by kres 8be5fa7.
name: integration-equinix-metal-cron
concurrency:
@ -54,7 +54,7 @@ jobs:
timeout-minutes: 10
- name: Mask secrets
run: |
echo -e "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"

View File

@ -0,0 +1,126 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-06T05:05:58Z by kres 8be5fa7.
name: integration-gcp-cron
concurrency:
group: ${{ github.head_ref || github.run_id }}
cancel-in-progress: true
"on":
schedule:
- cron: 30 7 * * *
jobs:
default:
runs-on:
- self-hosted
- generic
steps:
- name: gather-system-info
id: system-info
uses: kenchan0130/actions-system-info@v1.3.0
continue-on-error: true
- name: print-system-info
run: |
MEMORY_GB=$((${{ steps.system-info.outputs.totalmem }}/1024/1024/1024))
OUTPUTS=(
"CPU Core: ${{ steps.system-info.outputs.cpu-core }}"
"CPU Model: ${{ steps.system-info.outputs.cpu-model }}"
"Hostname: ${{ steps.system-info.outputs.hostname }}"
"NodeName: ${NODE_NAME}"
"Kernel release: ${{ steps.system-info.outputs.kernel-release }}"
"Kernel version: ${{ steps.system-info.outputs.kernel-version }}"
"Name: ${{ steps.system-info.outputs.name }}"
"Platform: ${{ steps.system-info.outputs.platform }}"
"Release: ${{ steps.system-info.outputs.release }}"
"Total memory: ${MEMORY_GB} GB"
)
for OUTPUT in "${OUTPUTS[@]}";do
echo "${OUTPUT}"
done
continue-on-error: true
- name: checkout
uses: actions/checkout@v4
- name: Unshallow
run: |
git fetch --prune --unshallow
- name: Set up Docker Buildx
id: setup-buildx
uses: docker/setup-buildx-action@v3
with:
driver: remote
endpoint: tcp://buildkit-amd64.ci.svc.cluster.local:1234
timeout-minutes: 10
- name: Mask secrets
run: |
echo "$(sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | "::add-mask::" + .value')"
- name: Set secrets for job
run: |
sops -d .secrets.yaml | yq -e '.secrets | to_entries[] | .key + "=" + .value' >> "$GITHUB_ENV"
- name: Download artifacts
if: github.event_name != 'schedule'
uses: actions/download-artifact@v4
with:
name: talos-artifacts
path: _out
- name: Fix artifact permissions
if: github.event_name != 'schedule'
run: |
xargs -a _out/executable-artifacts -I {} chmod +x {}
- name: uki-certs
if: github.event_name == 'schedule'
env:
PLATFORM: linux/amd64
run: |
make uki-certs
- name: build
if: github.event_name == 'schedule'
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
PLATFORM: linux/amd64
PUSH: "true"
run: |
make talosctl-linux-amd64 kernel sd-boot sd-stub initramfs installer imager talos _out/integration-test-linux-amd64
- name: talosctl-cni-bundle
if: github.event_name == 'schedule'
run: |
make talosctl-cni-bundle
- name: images-essential
if: github.event_name == 'schedule'
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
PLATFORM: linux/amd64
run: |
make images-essential
- name: e2e-gcp-prepare
run: |
make e2e-gcp-prepare
- name: checkout contrib
uses: actions/checkout@v4
with:
path: _out/contrib
ref: main
repository: siderolabs/contrib
- name: setup tf
uses: hashicorp/setup-terraform@v3
with:
terraform_wrapper: "false"
- name: tf apply
env:
TF_E2E_ACTION: apply
TF_E2E_TEST_TYPE: gcp
TF_SCRIPT_DIR: _out/contrib
run: |
make e2e-cloud-tf
- name: e2e-gcp
run: |
make e2e-gcp
- name: tf destroy
if: always()
env:
TF_E2E_ACTION: destroy
TF_E2E_TEST_TYPE: gcp
TF_SCRIPT_DIR: _out/contrib
run: |
make e2e-cloud-tf

View File

@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-08-24T17:38:15Z by kres 8e4bbb4.
# Generated on 2024-09-05T16:23:55Z by kres b5ca957.
name: slack-notify
"on":
@ -32,6 +32,7 @@ name: slack-notify
- integration-aws-nvidia-oss-cron
- integration-aws-nvidia-nonfree-cron
- integration-azure-cron
- integration-gcp-cron
- integration-equinix-metal-cron
types:
- completed

View File

@ -77,6 +77,7 @@ spec:
- integration-aws-nvidia-oss
- integration-aws-nvidia-nonfree
- integration-azure
- integration-gcp
- integration-equinix-metal
---
kind: common.GHWorkflow
@ -1861,6 +1862,72 @@ spec:
ARM_CLIENT_ID: ${{ env.AZURE_CLIENT_ID }}
ARM_CLIENT_SECRET: ${{ env.AZURE_CLIENT_SECRET }}
ARM_TENANT_ID: ${{ env.AZURE_TENANT_ID }}
- name: integration-gcp
buildxOptions:
enabled: true
sops: true
depends:
- default
runners:
- self-hosted
- generic # we can use generic here since the tests run against a remote talos cluster
crons:
- '30 7 * * *'
triggerLabels:
- integration/gcp
steps:
- name: download-artifacts
conditions:
- not-on-schedule
artifactStep:
type: download
artifactName: talos-artifacts
artifactPath: _out
- name: uki-certs
conditions:
- only-on-schedule
environment:
PLATFORM: linux/amd64
- name: build
conditions:
- only-on-schedule
command: talosctl-linux-amd64 kernel sd-boot sd-stub initramfs installer imager talos _out/integration-test-linux-amd64
environment:
PLATFORM: linux/amd64
IMAGE_REGISTRY: registry.dev.siderolabs.io
PUSH: true
- name: talosctl-cni-bundle
conditions:
- only-on-schedule
- name: images-essential
conditions:
- only-on-schedule
environment:
PLATFORM: linux/amd64
IMAGE_REGISTRY: registry.dev.siderolabs.io
- name: e2e-gcp-prepare
- name: checkout contrib
checkoutStep:
repository: siderolabs/contrib
ref: main
path: _out/contrib
- name: setup tf
terraformStep: true
- name: tf apply
command: e2e-cloud-tf
environment:
TF_SCRIPT_DIR: _out/contrib
TF_E2E_TEST_TYPE: gcp
TF_E2E_ACTION: apply
- name: e2e-gcp
- name: tf destroy
command: e2e-cloud-tf
conditions:
- always
environment:
TF_SCRIPT_DIR: _out/contrib
TF_E2E_TEST_TYPE: gcp
TF_E2E_ACTION: destroy
- name: integration-equinix-metal
buildxOptions:
enabled: true

View File

@ -9,7 +9,7 @@ secrets:
EM_PROJECT_ID: ENC[AES256_GCM,data:nPVZ+Uoul/W7UpxIoeMP1n3YhuEjq3fNKD+zoso4FBP2Obd0,iv:SSF8KZBczWvCJjZpvDo60mnoM21CrzdmmKs2reLi8w0=,tag:VKjsQSHqiQY+IzkIXO70MA==,type:str]
EM_API_TOKEN: ENC[AES256_GCM,data:PnNDZTRDTubebmtAuH1sAuEp5ZwzVie5WA0AhCUk26M=,iv:5MdcOwY+QrIdkFgCXcs2rBGCXQBnhi/EDxTPWr/vCMs=,tag:mcQ9qrWPYMaPalzr/GV7pQ==,type:str]
GOOGLE_PROJECT_ID: ENC[AES256_GCM,data:egcG5hIa5aq6tSRjhA==,iv:g/6pkcSJIQNNgoon6X+6DH2JaQgLKfTDpPUNFjlJ6Xg=,tag:ygF0I8bLRRbj2RdogQqxmw==,type:str]
GOOGLE_CREDENTIALS_JSON: ENC[AES256_GCM,data:o1ZMFswuXh1q4LalVO1etrrOGShA/Uv9uUPox9X6uCvBS8kmx+3ZHKKJET7nklzJ/vMse0KHYTvHblnv8IZ5KJ+7z/XhYaHMbk1WSfn5/QYmg607yvyHqtwkeas/dZGJqE8yDJMT5JQdwLD6xvIicny2dI+WWOnJnOxFwEtwlaz3FUBndzPFaFZu3guXU3dFehe1hwipRuxPyWWYPKnuWXmN+yIVDFeiQedGGGLfuIFaG35xC0s+1ixDsow52vWAu9uU6Y9C7GuWPlC5u/xKTXTF1NR7Ji33ULQTaAZPC9NKpQ+dKsasK2wHlQYQGDMGVd+aEJnZl/7lMgp0EzygFTBne1TDg3S12N75c7E7CC89viLzYDp4DYPJ9jZz4+VZbPTdKRaVh+RYTOefKLXAjvj1N+3klDV5u9rGf/hsWtG4PkfjOCeuIZMZ1lfb1OxS2AtbRB+JgIgGImv/CSq5FnRQii5KVs1/FLl/peg69chKRLNLjJekB1CUQoZhzH9/D/upF3MYAQbvuiA/YsAWCxv6nK8bYaZtHpmTX/EpCjqpZ0d3BwMiJ7jx1aiLpqqMQzm+42x3T9OhvrA8PyZKHy7BOyTgWBREpQqUzHNJ/8Y3UYcQsjUkKCUFmJsnjvs4sdSaepbTuQRqq6WdkXeDZiwiHA+sT+BqG+pUWHqtHWtt8mxGFMlJeWnjo0hKil2Lrv9sfjN+Pemo8+SJ11dqUkcweIophQIlPsWr1rfCnNOwiUdOXuUEtKTJddQWYY/t9wDjwwVKs5hHNTcSs4AfstsOfRxDNeCMKqYOzbcryjI1rhXOULSYLtP+jI0Kh0GarcudejpqlJBje566NrqDHi7F1ZfwM6jj8NuPTUt75JxgTO33sd9PtMv9u5HtS/JPyXistdLe6ul1zWfVgUYEB1G9/QexLI//PRh/6AdCwJXCCf59/TTtCq49B4PRtn8TFRnTJT8kMFCqQiPjJaqM8zBTzVR7Iq51+wKNGMsbAmK8uiQtP8EMso5K+mXQ1aHXouzM/ZXAiahI0ee1C92uGEDsJmkVHmefzuBtpKL1y7EFBfeO2jg/FyWlLOji/kjZTMlyz3pSYfZqjddhkW6MSEVeh0p1Y8v1FK5dBRS/IJo7SWg5NCkCKmAZjePJVbOfjuVyawXRyH/x5i1f7qgGkBZIRi6ddbIdZ+zivCEb7ehXkdPL/whFLtpQPSwHzpE8mxNNcs29niq3Dx3pSIz2ZsNWhr0J3bjP319o44fJaG2QgI2BJ2zeQV1LSiLL6peQ0e2Ay8bAotn/Xd+ncqKn6DbkUjiFTz2O/ZK71xHx9WNoVEisQWvYTNR4y5IL1Z8reYfAVzrxgacqhcgJJU9dTDMYVm/I3gcUbdTca3mkr0tDxtpy3ZKNZLoU/6Le/QoeJwqlbm0sz4ryobz5EetlQyQFZ0r78mYk7fZ9ROS3vI3HKC1c07bSP6vrtTuaxUymxv3zJcflztAN+32MGe+lylNMebSe8Q6sOeAQA+Zu1udoXkOA7kxP51ZpTmHyNxEKe+7NUQ7FF5Uv4s7PkivONxZdbFuh3EM5luukxF+oV4OJX51qwQvFzQzEQnawO8KD0zrOLHRRC/dwgQYCiZhdLfwBxkvpGPjZ+MF3DF3ACRKgi7KZETACMD/hyaOeIkAe11MjBHAZ9C8RBMVhzOkHORrFz6tyfDuDK5ylXa/LktKSJHv8zsfVoK1Cz6hp+RZl7gmT7L7MWv1zr5EwI2CHb5lqk8eRTAe9kVS04TIHz0q8ziw7b9qiNvclPRsJtWtZt3H+B8Nu1sYI20lq0iBdOAtGMHlgz65EVMF3X0Cs4669+fddkNmdv/Ad2nkuK2H3LVKTGI0o0hR5t/TrPNmuQqBuwx7HiBgtavReg5iHlL00YPwE8jTM7az3s5uiSndtw3YDgMxhB2S65orUeIGuEcwOoXnPV3+Xexqo3zRnIt6ryIWXKY3muINmhFtvZkVTp7KzuWt2Anbi9F2xhVn2n0GN6ODPQXEI8nwLxMh3ngmRaRD9EcX5+E2qfCouzfm/4eWKCjw/GggTP8NUjbIYPzFnfy2IIn677/kQDHUYwokItPWjiz811FMm/B6UAaKBDRME2HiVPSYJH10LvduVspaLXBx94Yr8e9inTSHSDM/8GnkdYtyNOfwn/ArwvwyrOXA3JkORIq2aNskGVWYQ5yRvbuK8CRpfo/WAZ52IaB4EmBy0R6YJrPBqlNoW0zw1wSijQZm1ZwtbxAvqY7K6sK85B9mgIVZa9EixyB9V6eU4Voe06opdSL8RE12D1sIiO0+NnDJl3BgFj0mg0Wxs8eiBVa2GnSi/+SNqpo0U15ExteW6wXq2rS7Rl/1LFdbaCDUKWn7joTIErgszrbL6G1KE92Nb07bj83R3Q2c+gvBDxWE80yPaJTLD5Xa9PC19WrgqKM5FGF9kxJ8q//NJ4MVDgA+qbdh4zB12zNBTK03/Q31d2DsNNngQGv+Lyv7Jx2nSQZLRw1gmOUq9ZEICm3LovBsXS4NDUKz4as0b0Mpp0iK6rdISkAXM0c2OJTPqwYhbFqFOf+PwxUv+YWeidgozq1qB9Dfc82OOfdZIbwKPBVJcEy+UGjg38KPq/GXLFy5FyeMepmzl+Wf+UxnWzqbM03n2qG9pb6Ur8wOtEN0NCvZ5znk3nAI5UC56VKw6edQ/M3B0GIfztcJ/agZTjgBgbovqcnNl7iv9lFGn9YVRocm/MiWHe590JaJdSySOxVIojSRJ9fSQvCAU/uHGSR5OvtY3WwVw8qjh06lnhpBWcKjwFA34wh5VLqkH6b8qke6yE3YD4/JhZWMriYsjXRPm4WUj3MrruwbSsh3EH0af9WV345WI5f29CeYkMwKdYk2ErZdQd5McOx4E1fznh2INfyiMjaQ8o1dxlvSZdx6/KC4O5F8r+UrJV7Gbr0m7ryeJK3NfSMPjMLwVCsPdCdtDTDnVbr/yLhfYQ/idlrLSBrolcrt9imEy+1axQzlR0LKZ6+EyZhkKKH45l0yU7U0R++j9/XkZWcbf+zmgYLqOe+tF3FgVSWsAE27Kw0h4dFjVeeqpdcg=,iv:R8UR53WDK3kFRbLwOvWXt0UhaaPgxBLkiSJaPSRvTVA=,tag:RdUgGICSm8P2Dh0tzSbFvw==,type:str]
GOOGLE_CREDENTIALS: ENC[AES256_GCM,data:ucHNQ0fygvZnOfa5k3J1zO99122malyCB1aDDwTSOuQZsUHrnCg5FYsxJ5RgTl5mbpityJjKQtHLcMetRVLQ/S2/aPjhbx+OgYLmlFMnJR/NmqEQzuwe8YsPSjOg9NCqucE7BBfbKItQ/PD0+x6LRKxeMO/gr6BXTkL4dwPinCV9IgVZU2CEAdbvcGvxBGKWZ0mHEIvdznN3dK2OL27K+Q+VXZDhzUPfhR5V7u5WUu+PG0QsSby/lZNYdAHpxfkS0YjfTCpukCB0Zvix6cUaorA+BHNYA1qV3nzHDuHHdR6byoGYXMWU209RVG2HKiNAQRlmTsbjr641OWHustGo+Ann9Rv2pZ7r6Q08p0M8cvn5o42TB5H3lv72Iiiru2aV9ZQHpzy1PG4PH9vfebowO2VbZ/wKpRWvc6cpPrAQfoXZCLlaiLoJ1KaVZb9Br8vjlGCHCFzytb61R8BT36M687fRerwVQSu6F5cRzfW6W3Jh5Ufo31wYqdEe2XdN0f/jDG1w0rF2xX4oScgXI/uaVrJ3zCYlVkdbnQjSmu059SmRRkfYJqri+hyDilWKKDPK3+qmBbyeJ+lxcNNugksRMfq5ms5HV70JoUCbiEQF5KzsTCw9s7O4xFYjP5ELXgwHqQbXeOxMS4+GAaN9k+p4owLkVRkO3PkWHBvs+aqCBQvsdZW8XjikNeHWHTZLq4DpOuBrtpvwID5bEPqN70ah1hU9gc9oXC4ArUU5QlqGxNkzdea/cymO7W+0JeRKKRW4G984b1xbLjjjL0Zx8VCDDDx7BTd+DrBsJUZ2CO+u2XIZX6+LNqPt9n/DQUARv13KM4hUmugjhZxx0f0mro1bXMfEfbs8micuk7qDQtKVkRApUlCEb+kEVDXMQUeetLQfUhU7c23QA8tMBHz2IoEmY5yf6CI5PfcUSI9Oldez4+JlCFPf3a2UyVR0+4VITYdBFW0dKLhjR/eYNOh3kEYERksALRYoS2XtLLquHSxRgAPki62RQOrZclfdi1+IGT17l+PyoSKOusIUkHd/y0jUHBSIBjYAZrqZ2WolYpwTMzE3hz3lMys9RbrJV653QgLCndWDXD7k+imXsyyYy1me/JHssYu6ZZ7EsPTGNezdG1/9j7X1VHYKw0DMsYXwIVEI+7/flwHExVTT10+gO+WIVnf7QdUpvsqJNsniCS82Rq9tRhIwLIiePOdV1azMBVqbI8LUBm/E9iFW+iYEuezLL5YsQFvItrNuH5wwhdFNiWrdBWz7klxb0R61/EBPPr6UvVf5d5WyCYonn9UUmUfQrzhgplfr3Qk8a5IAoaT1SW+iS1e15O8PsARGKy/AcX+mTf2v1qgiPnk5IucsZGlH9dwl2gIBbJ8d8SZPl4mGjW9UMesyZxoBTyY1OOYZ3NQSCZnvEggiI+BpIt35nwMLf71LZrXJOY/Q/s2eQBih4FsNb32wlEFcfpxrvT42deh1fatDTZP9NLldbyXWJkyxdAiziUiFtTM3rBly6mNG5hBsbKQpjzN94Tb/9oMD9RbCBcyj1e96DAsRNamqkPlRecZtdh9OuAuq2EjfsEFE9sodtbdxHwDdnR40SElw4jrJ0Ga0QNq04FmXMP86dz/h+MRUO3ldDvax/kjAjR5MRJZXr117sW6fuls268Qo14zdL/nbd9jYdHn9hheCAdM0FvohuCXSWgvoVcpKKCD8kKvgIaXvaFxxAKqWisrxTbtSFoflYzMjaP7L3lY5LlcadTSb+KJl5RVY3ERuUnirMrxVKYRZFsug+Smr5hlcbup/wEESv2aFp3JkEqQxfGxD+g3tHwW9U3KvJ7iv/GQFsikZ2UfLJ0EVAuiVu4Dv25CcxlWx/NGJ3e/FC9nWLgpwnzezsuithZZXci2bH4NvnDxA669Jq3LjwTcKMguEbWOuDBDlO9JcvZsa+0vlQKzZmrzirwLIi8hXQyTeKaHk1ahcblbaZGYoI3E13u7DiGvaQ/+4Y65UFtGusR/NmXG1B4NrGb/EG3mc60JFfIi6CBhtugwEdGYGDkymgHiIM/fb6b8UHXV/kcfnTsGweH2Lc7sR5eMP5a8D7O2P4uvwp3r2NRLwrrGunOARpo835Kx/ZCheS5XCDuiy+LEaTHjDD5cDl2RlgheMjVAcXF/pc3+Q+JBccKdw1D/aN8tYTsPiJiCW6bAOVLwOm1vaa9ecQiNxnx8gym4bYERrx+hlDExS/I+xtmmdJe/s5EqGrVImZjXSkb3LON+ktATByoQVpNjsBpVfnyR7rFcz/UdLDIX3l9IS1cZhIBtkqGSylbOXMUEhhq70qcHpuNTpGxGP30K1c5A63BQB0hpM/wyV629WHUYD1SM48niVT36wVxj356secwI6JVA3hlZp5t7F/KcIuAG1WVy/GUf+cKBWNSnniRLUJdBXSXNamG5o+CN0AXvDNyP9I/xQsrbWzPC5eBNqRL0MEJsfZlWFR6fyI+5UyLlfIh0rwYTz7BK9l1wq1TRV7YqiFpALObtGVxclfKxdFymWwEfjUCwVqw8RAETGqOwfVLvvDlZp/RGup8M2ghsql1Uu/BlrCAoaiJbF8eeuIwNx6K/sq5P8RIFXRZBmWMqewg93Ea/hUIAhubz4fZtHvlh1mbOPWUkzlPCyBPd3uM93mw6wCp6xQM3DbZfQRnKD8HPr6+CFPIn1YGZ1oftBM9mEpH7PtpwJWwDA+Wdl1VbBjUjFG4XVdWxsqbS0W7pePEW1rnqv/bGuPHctEUAXN9IGKSW953T4ChFgYXGcNEURtkknE7bMlQSDZvLS/vtn89aaaL8gnfBF8NvFkB8uQSkQfn/CvQ00GdlhI1l0CyV0+xMrerUw+20nlhC7C7aNKsyx7S8HatUIEkvD6/XPRNoGa9M6w+81JYWbbDF6DFljiiNKtWmv5BkVhWFTwimEOmqKQc6DmXbvtGqH7Pts7PXS/5k83D8eV4kPHuiKNwdU0wsdo+PGSLP+OMWF3Ygbb71Bqq6dwnfciXQi/6yCUv3WdQf2MbMzyqEy9ugXeWI4uvbh+/u5HHzhwMTzhZVoGgwXuyX+nCFPCsqdzqzn8s+TSoU=,iv:nLXpwEqTJaQnZlE0CiQl+hWlBIJGSa8tEUebXpkMWec=,tag:MZ/FJeksYkqCsLaELJDq0A==,type:str]
sops:
kms: []
gcp_kms: []
@ -25,8 +25,8 @@ sops:
ZE0zRWwxVzBLL3Q1WW1FNmVvc0txZm8K+GkjAq/WSduuDrsbeyqVi29Pj2IL25mA
a11K/HVqTCU834uHQXjpN3keJS23v5BJGZCpOwVXyZX8f1yAm/ZQAA==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-09-04T04:28:03Z"
mac: ENC[AES256_GCM,data:PHRJmUueHiv84Pt8fHAze4LWl0RiuNuKozWC/G1ixhqL055Zfe3X5Iv//0qFXDMWBy9b09IiXbd4WwsexaoLhgRE1ZBul1AObK8gsbBHf1DyjxgCek/AJNlIS3WY6NPZ9L5lwxuD0BhqbgzGw6It3rcIyx0q++Zo2UoDQyMciWA=,iv:0R2VsKSFfIzapJsqQELM7LMXvEhDmDWr1f9Amg5i4hs=,tag:ig8uLTnupsmPlgV8GEZTgA==,type:str]
lastmodified: "2024-09-05T16:58:15Z"
mac: ENC[AES256_GCM,data:/7bib7AGGFlP2l93l0Ht6r4Guxaq0sVtsDEbmyEYHYZh1NKwIy/RO0W3SwGXcQPNI9vgPjdP3lsXsUiJ0B0ADxsjWrTjZOkIHxhsBr6Yr59uTGiGOMFEYKPSK5+5ZG9dR4yu0CHyZlxHq+P1L/grsmb0ucazfWguM9y58hqpG24=,iv:ZgWz2o47PgN+3uXDzTID/IgSe/Y5fd+Ubz7MlHKgmRg=,tag:CUbJx/WHlSr5/Jpyf7ODcg==,type:str]
pgp:
- created_at: "2024-04-29T17:03:17Z"
enc: |-

View File

@ -102,8 +102,6 @@ KUBECTL_VERSION ?= v1.31.0
KUBESTR_VERSION ?= v0.4.46
# renovate: datasource=github-releases depName=helm/helm
HELM_VERSION ?= v3.15.4
# renovate: datasource=github-releases depName=kubernetes-sigs/cluster-api
CLUSTERCTL_VERSION ?= 1.8.1
# renovate: datasource=github-releases depName=cilium/cilium-cli
CILIUM_CLI_VERSION ?= v0.16.16
# renovate: datasource=github-releases depName=microsoft/secureboot_objects
@ -112,7 +110,6 @@ MICROSOFT_SECUREBOOT_RELEASE ?= v1.1.3
KUBECTL_URL ?= https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/$(OPERATING_SYSTEM)/amd64/kubectl
KUBESTR_URL ?= https://github.com/kastenhq/kubestr/releases/download/$(KUBESTR_VERSION)/kubestr_$(subst v,,$(KUBESTR_VERSION))_Linux_amd64.tar.gz
HELM_URL ?= https://get.helm.sh/helm-$(HELM_VERSION)-linux-amd64.tar.gz
CLUSTERCTL_URL ?= https://github.com/kubernetes-sigs/cluster-api/releases/download/v$(CLUSTERCTL_VERSION)/clusterctl-$(OPERATING_SYSTEM)-amd64
CILIUM_CLI_URL ?= https://github.com/cilium/cilium-cli/releases/download/$(CILIUM_CLI_VERSION)/cilium-$(OPERATING_SYSTEM)-amd64.tar.gz
TESTPKGS ?= github.com/siderolabs/talos/...
RELEASES ?= v1.6.7 v1.7.0
@ -426,6 +423,7 @@ cloud-images: ## Uploads cloud images (AMIs, etc.) to the cloud registry.
-e TAG=$(TAG) -e ARTIFACTS=$(ARTIFACTS) -e ABBREV_TAG=$(ABBREV_TAG) \
-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY \
-e AZURE_SUBSCRIPTION_ID -e AZURE_CLIENT_ID -e AZURE_CLIENT_SECRET -e AZURE_TENANT_ID \
-e GOOGLE_PROJECT_ID -e GOOGLE_CREDENTIALS \
golang:$(GO_VERSION) \
./hack/cloud-image-uploader.sh $(CLOUD_IMAGES_EXTRA_ARGS)
@ -492,17 +490,12 @@ $(ARTIFACTS)/helm:
@curl -L "$(HELM_URL)" | tar xzf - -C $(ARTIFACTS) --strip-components=1 linux-amd64/helm
@chmod +x $(ARTIFACTS)/helm
$(ARTIFACTS)/clusterctl:
@mkdir -p $(ARTIFACTS)
@curl -L -o $(ARTIFACTS)/clusterctl "$(CLUSTERCTL_URL)"
@chmod +x $(ARTIFACTS)/clusterctl
$(ARTIFACTS)/cilium:
@mkdir -p $(ARTIFACTS)
@curl -L "$(CILIUM_CLI_URL)" | tar xzf - -C $(ARTIFACTS) cilium
@chmod +x $(ARTIFACTS)/cilium
external-artifacts: $(ARTIFACTS)/kubectl $(ARTIFACTS)/clusterctl $(ARTIFACTS)/kubestr $(ARTIFACTS)/helm $(ARTIFACTS)/cilium
external-artifacts: $(ARTIFACTS)/kubectl $(ARTIFACTS)/kubestr $(ARTIFACTS)/helm $(ARTIFACTS)/cilium
e2e-%: $(ARTIFACTS)/$(INTEGRATION_TEST_DEFAULT_TARGET)-amd64 external-artifacts ## Runs the E2E test for the specified platform (e.g. e2e-docker).
@$(MAKE) hack-test-$@ \
@ -520,7 +513,6 @@ e2e-%: $(ARTIFACTS)/$(INTEGRATION_TEST_DEFAULT_TARGET)-amd64 external-artifacts
KUBECTL=$(PWD)/$(ARTIFACTS)/kubectl \
KUBESTR=$(PWD)/$(ARTIFACTS)/kubestr \
HELM=$(PWD)/$(ARTIFACTS)/helm \
CLUSTERCTL=$(PWD)/$(ARTIFACTS)/clusterctl \
CILIUM_CLI=$(PWD)/$(ARTIFACTS)/cilium
provision-tests-prepare: release-artifacts $(ARTIFACTS)/$(INTEGRATION_TEST_PROVISION_DEFAULT_TARGET)-amd64

View File

@ -40,14 +40,14 @@ type GCPUploder struct {
// NewGCPUploder creates a new GCPUploder.
func NewGCPUploder(options Options) (*GCPUploder, error) {
projectID := os.Getenv("GOOGLE_PROJECT_ID")
credentials := os.Getenv("GOOGLE_CREDENTIALS_JSON")
credentials := os.Getenv("GOOGLE_CREDENTIALS")
if projectID == "" {
return nil, fmt.Errorf("gcp: GOOGLE_PROJECT_ID is not set")
}
if credentials == "" {
return nil, fmt.Errorf("gcp: GOOGLE_CREDENTIALS_JSON is not set")
return nil, fmt.Errorf("gcp: GOOGLE_CREDENTIALS is not set")
}
gcpUploader := &GCPUploder{

View File

@ -1,137 +0,0 @@
apiVersion: cluster.k8s.io/v1alpha1
kind: Cluster
metadata:
annotations: null
name: talos-e2e-{{TAG}}-azure
spec:
clusterNetwork:
pods:
cidrBlocks:
- 192.168.0.0/16
serviceDomain: cluster.local
services:
cidrBlocks:
- 10.96.0.0/12
providerSpec:
value:
apiVersion: talosproviderconfig/v1alpha1
kind: TalosClusterProviderSpec
platform:
config: |-
resourcegroup: "talos"
location: "centralus"
type: azure
controlplane:
count: 3
k8sversion: '1.16.2'
---
apiVersion: cluster.k8s.io/v1alpha1
kind: Machine
metadata:
labels:
cluster.k8s.io/cluster-name: talos-e2e-{{TAG}}-azure
set: master
name: talos-e2e-{{TAG}}-azure-master-0
spec:
providerSpec:
value:
apiVersion: talosproviderconfig/v1alpha1
kind: TalosMachineProviderSpec
platform:
config: |-
location: "centralus"
resourcegroup: "talos"
instances:
type: "Standard_A2_v2"
image: "/subscriptions/64739c64-c063-4c9d-bf2c-d1191ed8befa/resourceGroups/talos/providers/Microsoft.Compute/images/talos-e2e-{{TAG}}"
network: "talos-vnet"
subnet: "default"
disks:
size: 10
type: azure
---
apiVersion: cluster.k8s.io/v1alpha1
kind: Machine
metadata:
labels:
cluster.k8s.io/cluster-name: talos-e2e-{{TAG}}-azure
set: master
name: talos-e2e-{{TAG}}-azure-master-1
spec:
providerSpec:
value:
apiVersion: talosproviderconfig/v1alpha1
kind: TalosMachineProviderSpec
platform:
config: |-
location: "centralus"
resourcegroup: "talos"
instances:
type: "Standard_A2_v2"
image: "/subscriptions/64739c64-c063-4c9d-bf2c-d1191ed8befa/resourceGroups/talos/providers/Microsoft.Compute/images/talos-e2e-{{TAG}}"
network: "talos-vnet"
subnet: "default"
disks:
size: 10
type: azure
---
apiVersion: cluster.k8s.io/v1alpha1
kind: Machine
metadata:
labels:
cluster.k8s.io/cluster-name: talos-e2e-{{TAG}}-azure
set: master
name: talos-e2e-{{TAG}}-azure-master-2
spec:
providerSpec:
value:
apiVersion: talosproviderconfig/v1alpha1
kind: TalosMachineProviderSpec
platform:
config: |-
location: "centralus"
resourcegroup: "talos"
instances:
type: "Standard_A2_v2"
image: "/subscriptions/64739c64-c063-4c9d-bf2c-d1191ed8befa/resourceGroups/talos/providers/Microsoft.Compute/images/talos-e2e-{{TAG}}"
network: "talos-vnet"
subnet: "default"
disks:
size: 10
type: azure
---
apiVersion: cluster.k8s.io/v1alpha1
kind: MachineDeployment
metadata:
labels:
cluster.k8s.io/cluster-name: talos-e2e-{{TAG}}-azure
set: worker
name: talos-e2e-{{TAG}}-azure-workers
spec:
replicas: 3
selector:
matchLabels:
cluster.k8s.io/cluster-name: talos-e2e-{{TAG}}-azure
set: worker
template:
metadata:
labels:
cluster.k8s.io/cluster-name: talos-e2e-{{TAG}}-azure
set: worker
spec:
providerSpec:
value:
apiVersion: talosproviderconfig/v1alpha1
kind: TalosMachineProviderSpec
platform:
config: |-
location: "centralus"
resourcegroup: "talos"
instances:
type: "Standard_A2_v2"
image: "/subscriptions/64739c64-c063-4c9d-bf2c-d1191ed8befa/resourceGroups/talos/providers/Microsoft.Compute/images/talos-e2e-{{TAG}}"
network: "talos-vnet"
subnet: "default"
disks:
size: 10
type: azure

View File

@ -1,24 +0,0 @@
## These are just dumb, empty secrets to meet the requirements of our cluster-api provider
apiVersion: v1
kind: Secret
metadata:
name: aws-credentials
namespace: cluster-api-provider-talos-system
data:
credentials: "{{AWS_SVC_ACCT}}"
---
apiVersion: v1
kind: Secret
metadata:
name: gce-credentials
namespace: cluster-api-provider-talos-system
data:
service-account.json: "{{GCE_SVC_ACCT}}"
---
apiVersion: v1
kind: Secret
metadata:
name: azure-credentials
namespace: cluster-api-provider-talos-system
data:
service-account.json: "{{AZURE_SVC_ACCT}}"

View File

@ -1,7 +0,0 @@
providers:
- name: "talos"
url: "https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/bootstrap-components.yaml"
type: "BootstrapProvider"
- name: "talos"
url: "https://github.com/siderolabs/cluster-api-control-plane-provider-talos/releases/latest/control-plane-components.yaml"
type: "ControlPlaneProvider"

View File

@ -1,52 +0,0 @@
Validated: true
Available: true
Errors: []
ReadOnly: false
Meta:
color: 'yellow'
feature-flags: 'change-stage-v2'
icon: 'linux'
title: 'Talos'
Endpoint: ''
Bundle: ''
Name: 'talos'
Description: 'talos'
Documentation: ''
OS:
Name: 'talos'
Family: 'talos'
Codename: ''
Version: '0.6'
IsoFile: 'talos.tar.gz'
IsoSha256: '401ec26cef179e0a66d44f020dad47f7322f4cfc84826242e9c83d6cce4f2547'
IsoUrl: 'https://github.com/siderolabs/talos/releases/download/v0.6.0/boot.tar.gz'
SupportedArchitectures: {}
Templates:
- Name: 'pxelinux'
Path: 'pxelinux.cfg/{{.Machine.HexAddress}}'
ID: 'default-pxelinux.tmpl'
Contents: ''
Meta: {}
- Name: 'ipxe'
Path: '{{.Machine.Address}}.ipxe'
ID: 'default-ipxe.tmpl'
Contents: ''
Meta: {}
- Name: 'pxelinux-mac'
Path: 'pxelinux.cfg/{{.Machine.MacAddr "pxelinux"}}'
ID: 'default-pxelinux.tmpl'
Contents: ''
Meta: {}
- Name: 'ipxe-mac'
Path: '{{.Machine.MacAddr "ipxe"}}.ipxe'
ID: 'default-ipxe.tmpl'
Contents: ''
Meta: {}
Kernel: 'vmlinuz'
Initrds:
- 'initramfs.xz'
BootParams: 'console=tty0 ip=dhcp modules=loop,squashfs nomodeset init_on_alloc=1 slab_nomerge pti=on ima_template=ima-ng ima_appraise=fix ima_hash=sha512 talos.platform=metal talos.config={{ .ProvisionerURL }}/files/{{.Param "talos/role"}}.yaml'
RequiredParams: []
OptionalParams:
- 'talos/role'
OnlyUnknown: false

View File

@ -1,16 +0,0 @@
Validated: true
Available: true
Errors: []
ReadOnly: false
Meta:
color: "black"
icon: "tags"
title: "Talos role"
Endpoint: ""
Bundle: ""
Name: "talos/role"
Description: "Role of node"
Documentation: ""
Secure: false
Schema:
type: "string"

View File

@ -1,23 +0,0 @@
Validated: true
Available: true
Errors: []
ReadOnly: false
Meta:
color: 'yellow'
icon: 'spinner'
title: 'talos'
Endpoint: ''
Bundle: ''
Name: 'Talos'
Description: 'Talos'
Documentation: ''
Templates: []
RequiredParams:
- 'talos/role'
OptionalParams: []
Params: {}
BootEnv: 'talos'
Tasks: []
Profiles: []
Reboot: false
RunnerWait: true

View File

@ -1,15 +0,0 @@
Validated: true
Available: true
Errors: []
ReadOnly: false
Meta:
color: "black"
icon: "tags"
title: "Talos-controlplane"
Endpoint: ""
Bundle: ""
Name: "talos-controlplane"
Description: "Talos controlplane"
Documentation: ""
Params:
talos/role: "controlplane"

View File

@ -1,15 +0,0 @@
Validated: true
Available: true
Errors: []
ReadOnly: false
Meta:
color: "black"
icon: "tags"
title: "Talos-init"
Endpoint: ""
Bundle: ""
Name: "talos-init"
Description: "Talos init"
Documentation: ""
Params:
talos/role: "init"

View File

@ -1,15 +0,0 @@
Validated: true
Available: true
Errors: []
ReadOnly: false
Meta:
color: "black"
icon: "tags"
title: "Talos-worker"
Endpoint: ""
Bundle: ""
Name: "talos-worker"
Description: "Talos worker"
Documentation: ""
Params:
talos/role: "worker"

View File

@ -1,15 +0,0 @@
Validated: true
Available: true
Errors: []
ReadOnly: false
Meta:
color: "Yellow"
icon: "linux"
title: "Talos"
Endpoint: ""
Bundle: ""
Name: "Talos"
Description: "Talos"
Documentation: ""
Stages:
- "Talos"

View File

@ -1,39 +0,0 @@
#!/usr/bin/env bash
set -eou pipefail
source ./hack/test/e2e.sh
export CAPI_VERSION="${CAPI_VERSION:-1.4.0}"
export CAPA_VERSION="${CAPA_VERSION:-1.5.2}"
export CAPG_VERSION="${CAPG_VERSION:-1.3.0}"
# We need to override this here since e2e.sh will set it to ${TMP}/capi/kubeconfig.
export KUBECONFIG="/tmp/e2e/docker/kubeconfig"
# CABPT
export CABPT_NS="cabpt-system"
# Install envsubst
apk add --no-cache gettext
# Env vars for cloud accounts
set +x
export GCP_B64ENCODED_CREDENTIALS=${GCE_SVC_ACCT}
export AWS_B64ENCODED_CREDENTIALS=${AWS_SVC_ACCT}
set -x
${CLUSTERCTL} init \
--config hack/test/clusterctl.yaml \
--core "cluster-api:v${CAPI_VERSION}" \
--control-plane "talos" \
--infrastructure "aws:v${CAPA_VERSION},gcp:v${CAPG_VERSION}" \
--bootstrap "talos"
# Wait for the talosconfig
timeout=$(($(date +%s) + ${TIMEOUT}))
until ${KUBECTL} wait --timeout=1s --for=condition=Ready -n ${CABPT_NS} pods --all; do
[[ $(date +%s) -gt $timeout ]] && exit 1
echo 'Waiting to CABPT pod to be available...'
sleep 5
done

View File

@ -35,6 +35,3 @@ get_kubeconfig
${KUBECTL} config set-cluster e2e-docker --server https://10.5.0.2:6443
run_talos_integration_test_docker
run_kubernetes_integration_test
# Unlike other local e2e tests, we don't destroy the cluster there as it is used by CAPI and AWS/GCP e2e tests later.
# destroy_cluster

48
hack/test/e2e-gcp-prepare.sh Executable file
View File

@ -0,0 +1,48 @@
#!/usr/bin/env bash
set -eou pipefail
source ./hack/test/e2e.sh
REGION="us-central1"
ZONE="us-central1-a"
PROJECT_ID="${GOOGLE_PROJECT_ID}"
function cloud_image_upload() {
CLOUD_IMAGES_EXTRA_ARGS=("--target-clouds=gcp" "--architectures=amd64")
make cloud-images CLOUD_IMAGES_EXTRA_ARGS="${CLOUD_IMAGES_EXTRA_ARGS[*]}"
}
function get_image() {
jq -r ".[] | select(.cloud == \"gcp\") | select (.arch == \"amd64\") | .id" "${ARTIFACTS}/cloud-images.json"
}
cloud_image_upload
GCP_IMAGE=$(get_image)
mkdir -p "${ARTIFACTS}/e2e-gcp-generated"
NAME_PREFIX="talos-e2e-${SHA}-gcp"
jq --null-input \
--arg REGION "${REGION}" \
--arg ZONE "${ZONE}" \
--arg PROJECT_ID "${PROJECT_ID}" \
--arg GCP_IMAGE "${GCP_IMAGE}" \
--arg CLUSTER_NAME "${NAME_PREFIX}" \
--arg TALOS_VERSION_CONTRACT "${TALOS_VERSION}" \
--arg KUBERNETES_VERSION "${KUBERNETES_VERSION}" \
'{
region: $REGION,
zone: $ZONE,
project_id: $PROJECT_ID,
gcp_image: $GCP_IMAGE,
cluster_name: $CLUSTER_NAME,
talos_version_contract: $TALOS_VERSION_CONTRACT,
kubernetes_version: $KUBERNETES_VERSION
}' \
| jq -f hack/test/tfvars/gcp.jq > "${ARTIFACTS}/e2e-gcp-generated/vars.json"
cp hack/test/tfvars/*.yaml "${ARTIFACTS}/e2e-gcp-generated"

View File

@ -4,44 +4,12 @@ set -eou pipefail
source ./hack/test/e2e.sh
function setup {
set +x
echo ${GCE_SVC_ACCT} | base64 -d > ${TMP}/svc-acct.json
gcloud auth activate-service-account --key-file ${TMP}/svc-acct.json
set -x
cp "${ARTIFACTS}/e2e-gcp-talosconfig" "${TALOSCONFIG}"
cp "${ARTIFACTS}/e2e-gcp-kubeconfig" "${KUBECONFIG}"
## Cluster-wide vars
export CLUSTER_NAME=${NAME_PREFIX}
export GCP_PROJECT=siderolabs-dev
export GCP_REGION=us-central1
export GCP_NETWORK=default
export GCP_VM_SVC_ACCOUNT=e2e-tester@${GCP_PROJECT}.iam.gserviceaccount.com
# set the talosconfig to use the first controlplane ip
CONTROLPLANE0_NODE=$(${TALOSCTL} config info -o json | jq -r '.endpoints[0]')
${TALOSCTL} config node "${CONTROLPLANE0_NODE}"
## Control plane vars
export CONTROL_PLANE_MACHINE_COUNT=3
export GCP_CONTROL_PLANE_MACHINE_TYPE=n1-standard-4
export GCP_CONTROL_PLANE_VOL_SIZE=50
export GCP_CONTROL_PLANE_IMAGE_ID=projects/${GCP_PROJECT}/global/images/talos-e2e-${SHA}
## Worker vars
export WORKER_MACHINE_COUNT=3
export GCP_NODE_MACHINE_TYPE=n1-standard-4
export GCP_NODE_VOL_SIZE=50
export GCP_NODE_IMAGE_ID=projects/${GCP_PROJECT}/global/images/talos-e2e-${SHA}
## Create GCP Image
gsutil cp ${ARTIFACTS}/gcp-amd64.raw.tar.gz gs://siderolabs-e2e/gcp-${SHA}.tar.gz
gcloud --quiet --project ${GCP_PROJECT} compute images delete talos-e2e-${SHA} || true
gcloud --quiet --project ${GCP_PROJECT} compute images create talos-e2e-${SHA} --source-uri gs://siderolabs-e2e/gcp-${SHA}.tar.gz
${CLUSTERCTL} generate cluster ${NAME_PREFIX} \
--kubeconfig /tmp/e2e/docker/kubeconfig \
--from https://github.com/siderolabs/cluster-api-templates/blob/v1beta1/gcp/standard/standard.yaml > ${TMP}/cluster.yaml
}
setup
create_cluster_capi gcp
run_talos_integration_test
run_kubernetes_integration_test

View File

@ -17,14 +17,8 @@
# - KUBECTL
# - KUBESTR
# - HELM
# - CLUSTERCTL
# - CILIUM_CLI
#
# Some environment variables set in this file (e. g. TALOS_VERSION and KUBERNETES_VERSION)
# are referenced by https://github.com/siderolabs/cluster-api-templates.
# See other e2e-*.sh scripts.
set -eoux pipefail
TMP="/tmp/e2e/${PLATFORM}"
@ -49,86 +43,6 @@ export NUM_NODES=${TEST_NUM_NODES:-6}
PROVISIONER=
CLUSTER_NAME=
cleanup_capi() {
${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig delete cluster "${NAME_PREFIX}"
}
# Create a cluster via CAPI.
function create_cluster_capi {
trap cleanup_capi EXIT
${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig apply -f "${TMP}/cluster.yaml"
# Wait for first controlplane machine to have a name
timeout=$(($(date +%s) + TIMEOUT))
until [ -n "$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine -l cluster.x-k8s.io/control-plane,cluster.x-k8s.io/cluster-name="${NAME_PREFIX}" --all-namespaces -o json | jq -re '.items[0].metadata.name | select (.!=null)')" ]; do
[[ $(date +%s) -gt $timeout ]] && exit 1
sleep 10
${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine -l cluster.x-k8s.io/control-plane,cluster.x-k8s.io/cluster-name="${NAME_PREFIX}" --all-namespaces
done
FIRST_CP_NODE=$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine -l cluster.x-k8s.io/control-plane,cluster.x-k8s.io/cluster-name="${NAME_PREFIX}" --all-namespaces -o json | jq -r '.items[0].metadata.name')
# Wait for first controlplane machine to have a talosconfig ref
timeout=$(($(date +%s) + TIMEOUT))
until [ -n "$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine "${FIRST_CP_NODE}" -o json | jq -re '.spec.bootstrap.configRef.name | select (.!=null)')" ]; do
[[ $(date +%s) -gt $timeout ]] && exit 1
sleep 10
done
FIRST_CP_TALOSCONFIG=$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine "${FIRST_CP_NODE}" -o json | jq -re '.spec.bootstrap.configRef.name')
# Wait for talosconfig in cm then dump it out
timeout=$(($(date +%s) + TIMEOUT))
until [ -n "$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get talosconfig "${FIRST_CP_TALOSCONFIG}" -o jsonpath='{.status.talosConfig}')" ]; do
[[ $(date +%s) -gt $timeout ]] && exit 1
sleep 10
done
${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get talosconfig "${FIRST_CP_TALOSCONFIG}" -o jsonpath='{.status.talosConfig}' > "${TALOSCONFIG}"
# Wait until we have an IP for first controlplane node
timeout=$(($(date +%s) + TIMEOUT))
until [ -n "$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine -o go-template --template='{{range .status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}' "${FIRST_CP_NODE}")" ]; do
[[ $(date +%s) -gt $timeout ]] && exit 1
sleep 10
done
MASTER_IP=$(${KUBECTL} --kubeconfig /tmp/e2e/docker/kubeconfig get machine -o go-template --template='{{range .status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}' "${FIRST_CP_NODE}")
"${TALOSCTL}" config endpoint "${MASTER_IP}"
"${TALOSCTL}" config node "${MASTER_IP}"
# Wait for the kubeconfig from first cp node
timeout=$(($(date +%s) + TIMEOUT))
until get_kubeconfig; do
[[ $(date +%s) -gt $timeout ]] && exit 1
sleep 10
done
# Wait for nodes to check in
timeout=$(($(date +%s) + TIMEOUT))
until ${KUBECTL} get nodes -o go-template='{{ len .items }}' | grep "${NUM_NODES}" >/dev/null; do
[[ $(date +%s) -gt $timeout ]] && exit 1
${KUBECTL} get nodes -o wide && :
sleep 10
done
# Wait for nodes to be ready
timeout=$(($(date +%s) + TIMEOUT))
until ${KUBECTL} wait --timeout=1s --for=condition=ready=true --all nodes > /dev/null; do
[[ $(date +%s) -gt $timeout ]] && exit 1
${KUBECTL} get nodes -o wide && :
sleep 10
done
# Verify that we have an HA controlplane
timeout=$(($(date +%s) + TIMEOUT))
until ${KUBECTL} get nodes -l node-role.kubernetes.io/control-plane='' -o go-template='{{ len .items }}' | grep 3 > /dev/null; do
[[ $(date +%s) -gt $timeout ]] && exit 1
${KUBECTL} get nodes -l node-role.kubernetes.io/control-plane='' && :
sleep 10
done
}
TEST_SHORT=()
TEST_RUN=("-test.run" ".")

View File

@ -1,15 +0,0 @@
apiVersion: node.k8s.io/v1
kind: RuntimeClass
metadata:
name: gvisor
handler: runsc
---
apiVersion: v1
kind: Pod
metadata:
name: nginx-gvisor
spec:
runtimeClassName: gvisor
containers:
- name: nginx
image: nginx

View File

@ -1,93 +0,0 @@
# Integration Testing
## Setup
### Prerequisites
- A linux machine with KVM enabled
- `docker`
- `docker-compose`
- `virt-install`
- `qemu-kvm`
- `yq`
```bash
apt install -y virtinst qemu-kvm
curl -L https://github.com/mikefarah/yq/releases/download/2.4.1/yq_linux_amd64 -o /usr/local/bin/yq
chmod +x /usr/local/bin/yq
```
### Start Matchbox, Dnsmasq, and HAproxy
```bash
docker-compose up
```
> Note: This will run all services in the foreground.
### Create the VMs
```bash
./libvirt.sh up
```
### Getting the Console Logs
```bash
virsh console <VM>
```
### Connecting to the Nodes
#### From the Host
##### Setup DNS
Append the following to `/etc/hosts`:
```text
172.28.1.3 kubernetes.talos.dev
172.28.1.10 control-plane-1.talos.dev
172.28.1.11 control-plane-2.talos.dev
172.28.1.12 control-plane-3.talos.dev
172.28.1.13 worker-1.talos.dev
```
##### Setup `talosctl` and `kubectl`
```bash
export TALOSCONFIG=$PWD/matchbox/assets/talosconfig
export KUBECONFIG=$PWD/matchbox/assets/kubeconfig
```
```bash
talosctl config endpoint 172.28.1.10
talosctl kubeconfig ./matchbox/assets/kubeconfig
```
#### From a Container
```bash
./libvirt.sh workspace
```
```bash
talosctl config endpoint 172.28.1.10
talosctl kubeconfig .
```
#### Verify Connectivity
```bash
talosctl services
kubectl get nodes
```
## Teardown
To teardown the test:
```bash
docker-compose down
./libvirt.sh down
```

View File

@ -1,41 +0,0 @@
# dnsmasq.conf
no-daemon
domain=talos.dev
dhcp-range=172.28.1.50,172.28.1.99
dhcp-option=3,172.28.0.1
dhcp-host=52:54:00:a1:9c:ae,172.28.1.10,control-plane-1,1h
dhcp-host=52:54:00:b2:2f:86,172.28.1.11,control-plane-2,1h
dhcp-host=52:54:00:c3:61:77,172.28.1.12,control-plane-3,1h
dhcp-host=52:54:00:d7:99:c7,172.28.1.13,worker-1,1h
enable-tftp
tftp-root=/var/lib/tftpboot
# Legacy PXE
dhcp-match=set:bios,option:client-arch,0
dhcp-boot=tag:bios,undionly.kpxe
# UEFI
dhcp-match=set:efi32,option:client-arch,6
dhcp-boot=tag:efi32,ipxe.efi
dhcp-match=set:efibc,option:client-arch,7
dhcp-boot=tag:efibc,ipxe.efi
dhcp-match=set:efi64,option:client-arch,9
dhcp-boot=tag:efi64,ipxe.efi
# iPXE
dhcp-userclass=set:ipxe,iPXE
dhcp-boot=tag:ipxe,http://matchbox.talos.dev:8080/boot.ipxe
log-queries
log-dhcp
address=/matchbox.talos.dev/172.28.1.2
address=/kubernetes.talos.dev/172.28.1.3
address=/control-plane-1.talos.dev/172.28.1.10
address=/control-plane-2.talos.dev/172.28.1.11
address=/control-plane-3.talos.dev/172.28.1.12
address=/worker-1.talos.dev/172.28.1.13

View File

@ -1,52 +0,0 @@
version: '3.7'
services:
dnsmasq:
container_name: dnsmasq
image: quay.io/poseidon/dnsmasq:latest
command: ['-d']
networks:
talos:
ipv4_address: 172.28.1.1
volumes:
- ./dnsmasq/talos0.conf:/etc/dnsmasq.conf:Z
- ./dnsmasq/tftpboot:/var/lib/tftpboot
cap_add:
- NET_ADMIN
restart: always
matchbox:
container_name: matchbox
image: quay.io/poseidon/matchbox:latest
command: ['-address=0.0.0.0:8080', '-log-level=debug']
networks:
talos:
ipv4_address: 172.28.1.2
ports:
- '8080:8080'
volumes:
- ./matchbox:/var/lib/matchbox:Z
restart: always
haproxy:
container_name: haproxy
image: haproxy:1.9.12-alpine
networks:
talos:
ipv4_address: 172.28.1.3
ports:
- '6443:6443'
volumes:
- ./haproxy:/usr/local/etc/haproxy:ro
restart: always
networks:
talos:
name: talos
driver: bridge
driver_opts:
com.docker.network.bridge.name: talos0
ipam:
driver: default
config:
- subnet: 172.28.0.0/16

View File

@ -1,20 +0,0 @@
frontend k8s-api
bind 0.0.0.0:6443
bind 127.0.0.1:6443
mode tcp
option tcplog
timeout client 300000
default_backend k8s-api
backend k8s-api
mode tcp
option tcplog
option tcp-check
timeout connect 300000
timeout server 300000
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server apiserver1 172.28.1.10:6443 check
server apiserver2 172.28.1.11:6443 check
server apiserver3 172.28.1.12:6443 check

View File

@ -1,87 +0,0 @@
#!/usr/bin/env bash
set -e
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
function main {
case "$1" in
"up") up;;
"down") down;;
"workspace") workspace;;
*)
usage
exit 2
;;
esac
}
function usage {
echo "USAGE: ${0##*/} <command>"
echo "Commands:"
echo -e "\up\t\spin up QEMU/KVM nodes on the talos0 bridge"
echo -e "\down\t\tear down the QEMU/KVM nodes"
echo -e "\workspace\t\run and enter a docker container ready for talosctl and kubectl use"
}
NODES=(control-plane-1 control-plane-2 control-plane-3 worker-1)
INSTALLER=${INSTALLER:-ghcr.io/siderolabs/installer:latest}
VM_MEMORY=${VM_MEMORY:-2048}
VM_DISK=${VM_DISK:-10}
CNI_URL=${CNI_URL:-https://raw.githubusercontent.com/cilium/cilium/1.6.4/install/kubernetes/quick-install.yaml}
COMMON_VIRT_OPTS="--memory=${VM_MEMORY} --cpu=host --vcpus=1 --disk pool=default,size=${VM_DISK} --os-type=linux --os-variant=generic --noautoconsole --graphics none --events on_poweroff=preserve --rng /dev/urandom"
CONTROL_PLANE_1_NAME=control-plane-1
CONTROL_PLANE_1_MAC=52:54:00:a1:9c:ae
CONTROL_PLANE_2_NAME=control-plane-2
CONTROL_PLANE_2_MAC=52:54:00:b2:2f:86
CONTROL_PLANE_3_NAME=control-plane-3
CONTROL_PLANE_3_MAC=52:54:00:c3:61:77
WORKER_1_NAME=worker-1
WORKER_1_MAC=52:54:00:d7:99:c7
function up {
echo ${INSTALLER}
cp $PWD/../../../${ARTIFACTS}/initramfs.xz ./matchbox/assets/
cp $PWD/../../../${ARTIFACTS}/vmlinuz ./matchbox/assets/
cd ./matchbox/assets
$PWD/../../../../../${ARTIFACTS}/talosctl-linux-amd64 config generate --install-image ${INSTALLER} integration-test https://kubernetes.talos.dev:6443
yq w -i init.yaml machine.install.extraKernelArgs[+] 'console=ttyS0'
yq w -i init.yaml cluster.network.cni.name 'custom'
yq w -i init.yaml cluster.network.cni.urls[+] "${CNI_URL}"
yq w -i controlplane.yaml machine.install.extraKernelArgs[+] 'console=ttyS0'
yq w -i worker.yaml machine.install.extraKernelArgs[+] 'console=ttyS0'
cd -
virt-install --name $CONTROL_PLANE_1_NAME --network=bridge:talos0,model=e1000,mac=$CONTROL_PLANE_1_MAC $COMMON_VIRT_OPTS --boot=hd,network
virt-install --name $CONTROL_PLANE_2_NAME --network=bridge:talos0,model=e1000,mac=$CONTROL_PLANE_2_MAC $COMMON_VIRT_OPTS --boot=hd,network
virt-install --name $CONTROL_PLANE_3_NAME --network=bridge:talos0,model=e1000,mac=$CONTROL_PLANE_3_MAC $COMMON_VIRT_OPTS --boot=hd,network
virt-install --name $WORKER_1_NAME --network=bridge:talos0,model=e1000,mac=$WORKER_1_MAC $COMMON_VIRT_OPTS --boot=hd,network
}
function down {
for node in ${NODES[@]}; do
virsh destroy $node
done
for node in ${NODES[@]}; do
virsh undefine $node
done
virsh pool-refresh default
for node in ${NODES[@]}; do
virsh vol-delete --pool default $node.qcow2
done
}
function workspace {
docker run --rm -it -v $PWD:/workspace -v $PWD/../../../${ARTIFACTS}/talosctl-linux-amd64:/bin/talosctl:ro --network talos --dns 172.28.1.1 -w /workspace/matchbox/assets -e TALOSCONFIG='/workspace/matchbox/assets/talosconfig' -e KUBECONFIG='/workspace/matchbox/assets/kubeconfig' --entrypoint /bin/bash k8s.gcr.io/hyperkube:v1.18.3
}
main $@

View File

@ -1,8 +0,0 @@
{
"id": "control-plane-1",
"name": "control-plane-1",
"profile": "init",
"selector": {
"mac": "52:54:00:a1:9c:ae"
}
}

View File

@ -1,8 +0,0 @@
{
"id": "control-plane-2",
"name": "control-plane-2",
"profile": "controlplane",
"selector": {
"mac": "52:54:00:b2:2f:86"
}
}

View File

@ -1,8 +0,0 @@
{
"id": "control-plane-3",
"name": "control-plane-3",
"profile": "controlplane",
"selector": {
"mac": "52:54:00:c3:61:77"
}
}

View File

@ -1,5 +0,0 @@
{
"id": "default",
"name": "default",
"profile": "default"
}

View File

@ -1,22 +0,0 @@
{
"id": "default",
"name": "default",
"boot": {
"kernel": "/assets/vmlinuz",
"initrd": ["/assets/initramfs.xz"],
"args": [
"initrd=initramfs.xz",
"init_on_alloc=1",
"slab_nomerge",
"pti=on",
"ima_template=ima-ng",
"ima_appraise=fix",
"ima_hash=sha512",
"console=tty0",
"console=ttyS0",
"printk.devkmsg=on",
"talos.platform=metal",
"talos.config=http://matchbox.talos.dev:8080/assets/controlplane.yaml"
]
}
}

View File

@ -1,22 +0,0 @@
{
"id": "default",
"name": "default",
"boot": {
"kernel": "/assets/vmlinuz",
"initrd": ["/assets/initramfs.xz"],
"args": [
"initrd=initramfs.xz",
"init_on_alloc=1",
"slab_nomerge",
"pti=on",
"ima_template=ima-ng",
"ima_appraise=fix",
"ima_hash=sha512",
"console=tty0",
"console=ttyS0",
"printk.devkmsg=on",
"talos.platform=metal",
"talos.config=http://matchbox.talos.dev:8080/assets/worker.yaml"
]
}
}

View File

@ -1,22 +0,0 @@
{
"id": "default",
"name": "default",
"boot": {
"kernel": "/assets/vmlinuz",
"initrd": ["/assets/initramfs.xz"],
"args": [
"initrd=initramfs.xz",
"init_on_alloc=1",
"slab_nomerge",
"pti=on",
"ima_template=ima-ng",
"ima_appraise=fix",
"ima_hash=sha512",
"console=tty0",
"console=ttyS0",
"printk.devkmsg=on",
"talos.platform=metal",
"talos.config=http://matchbox.talos.dev:8080/assets/init.yaml"
]
}
}

View File

@ -1,43 +0,0 @@
#!/usr/bin/env bash
set -e
: ${TALOS_QEMU_ROOT:="/tmp"}
case $(uname -s) in
Linux*)
ACCEL=kvm
;;
Darwin*)
ACCEL=hvf
;;
*)
exit 1
;;
esac
KERNEL="_out/vmlinuz-amd64"
INITRD="_out/initramfs-amd64.xz"
IMAGE="$TALOS_QEMU_ROOT/rootfs.qcow2"
ISO="$TALOS_QEMU_ROOT/iso/config.iso"
talosctl gen config -o ${TALOS_QEMU_ROOT}/iso qemu https://10.254.0.10
cp ${TALOS_QEMU_ROOT}/iso/init.yaml ${TALOS_QEMU_ROOT}/iso/config.yaml
mkisofs -joliet -rock -volid 'metal-iso' -output ${ISO} ${TALOS_QEMU_ROOT}/iso
qemu-img create -f qcow2 ${IMAGE} 8G
qemu-system-x86_64 \
-m 2048 \
-accel ${ACCEL} \
-cpu max \
-smp 2 \
-hda ${IMAGE} \
-netdev user,id=talos,ipv4=on,net=10.254.0.0/24,dhcpstart=10.254.0.10,hostfwd=tcp::50000-:50000,hostfwd=tcp::6443-:6443,hostname=master-1 \
-device virtio-net,netdev=talos \
-nographic \
-serial mon:stdio \
-cdrom ${ISO} \
-append "talos.platform=metal init_on_alloc=1 slab_nomerge pti=on printk.devkmsg=on earlyprintk=serial,tty0,keep console=tty0 talos.config=metal-iso" \
-kernel ${KERNEL} \
-initrd ${INITRD}

11
hack/test/tfvars/gcp.jq Normal file
View File

@ -0,0 +1,11 @@
{
"project": .project_id,
"region": .region,
"zone": .zone,
"cluster_name": .cluster_name,
"talos_version_contract": .talos_version_contract,
"kubernetes_version": .kubernetes_version,
"control_plane": {
"image": .gcp_image,
}
}