chore: move csi tests as go test
Move rook-ceph CSI tests as go tests. This allows us to add more CSI tests in the future. Fixes: #9135 Signed-off-by: Noel Georgi <git@frezbo.dev>
This commit is contained in:
parent
e4f8cb854f
commit
8fe39eacba
9
.github/renovate.json
vendored
9
.github/renovate.json
vendored
@ -26,6 +26,15 @@
|
||||
],
|
||||
"versioningTemplate": "{{#if versioning}}{{versioning}}{{else}}semver{{/if}}"
|
||||
},
|
||||
{
|
||||
"fileMatch": [
|
||||
"internal/integration/k8s/constants.go"
|
||||
],
|
||||
"matchStrings": [
|
||||
"\\/\\/\\s+renovate: datasource=(?<datasource>.*?)(?:\\s+extractVersion=(?<extractVersion>.+?))?(?:\\s+versioning=(?<versioning>.+?))?\\s+depName=(?<depName>.+?)?(?:\\s+registryUrl=(?<registryUrl>.+?))?\\s.*Version\\s+=\\s+\\\"(?<currentValue>.+?)\\\""
|
||||
],
|
||||
"versioningTemplate": "{{#if versioning}}{{versioning}}{{else}}semver{{/if}}"
|
||||
},
|
||||
{
|
||||
"fileMatch": [
|
||||
"Dockerfile"
|
||||
|
136
.github/workflows/ci.yaml
vendored
136
.github/workflows/ci.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2024-08-14T14:49:10Z by kres 7be2a05.
|
||||
# Generated on 2024-08-24T17:38:15Z by kres 8e4bbb4.
|
||||
|
||||
name: default
|
||||
concurrency:
|
||||
@ -994,7 +994,7 @@ jobs:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_WORKERS: "2"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/cluster/network", "value": {"cni": {"name": "none"}}}]'
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/cilium-no-kubeproxy.yaml'
|
||||
WITH_CUSTOM_CNI: cilium
|
||||
WITH_FIREWALL: accept
|
||||
WITH_SKIP_K8S_NODE_READINESS_CHECK: "yes"
|
||||
@ -1006,7 +1006,7 @@ jobs:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_WORKERS: "2"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/cluster/network", "value": {"cni": {"name": "none"}}}, {"op": "add", "path": "/cluster/proxy", "value": {"disabled": true}}]'
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/cilium-kubeproxy.yaml'
|
||||
WITH_CUSTOM_CNI: cilium
|
||||
WITH_FIREWALL: accept
|
||||
WITH_SKIP_K8S_NODE_READINESS_CHECK: "yes"
|
||||
@ -1018,7 +1018,7 @@ jobs:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_WORKERS: "2"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/cluster/network", "value": {"cni": {"name": "none"}}}, {"op": "add", "path": "/cluster/proxy", "value": {"disabled": true}}]'
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/cilium-kubeproxy.yaml'
|
||||
WITH_CUSTOM_CNI: cilium
|
||||
WITH_FIREWALL: accept
|
||||
WITH_KUBESPAN: "true"
|
||||
@ -1428,7 +1428,7 @@ jobs:
|
||||
QEMU_EXTRA_DISKS: "3"
|
||||
QEMU_MEMORY_WORKERS: "4096"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH_WORKER: '@_out/extensions-patch.yaml'
|
||||
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml'
|
||||
run: |
|
||||
sudo -E make e2e-qemu
|
||||
- name: save artifacts
|
||||
@ -1989,7 +1989,7 @@ jobs:
|
||||
env:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/machine/install/extraKernelArgs/-", "value": "talos.unified_cgroup_hierarchy=0"}]'
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/cgroupsv1.yaml'
|
||||
WITH_UEFI: "false"
|
||||
run: |
|
||||
sudo -E make e2e-qemu
|
||||
@ -2562,7 +2562,7 @@ jobs:
|
||||
~/.talos/clusters/**/*.log
|
||||
!~/.talos/clusters/**/swtpm.log
|
||||
retention-days: "5"
|
||||
integration-qemu-csi:
|
||||
integration-qemu-csi-longhorn:
|
||||
permissions:
|
||||
actions: read
|
||||
contents: write
|
||||
@ -2572,7 +2572,7 @@ jobs:
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- talos
|
||||
if: contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi')
|
||||
if: contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi') || contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi-longhorn')
|
||||
needs:
|
||||
- default
|
||||
steps:
|
||||
@ -2635,8 +2635,122 @@ jobs:
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
make talosctl-cni-bundle
|
||||
- name: e2e-qemu-csi
|
||||
- name: checkout extensions
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: _out/extensions
|
||||
ref: main
|
||||
repository: siderolabs/extensions
|
||||
- name: set variables
|
||||
run: |
|
||||
cat _out/talos-metadata >> "$GITHUB_ENV"
|
||||
- name: build extensions
|
||||
env:
|
||||
PLATFORM: linux/amd64
|
||||
PUSH: "true"
|
||||
REGISTRY: registry.dev.siderolabs.io
|
||||
run: |
|
||||
make iscsi-tools util-linux-tools extensions-metadata -C _out/extensions
|
||||
- name: installer extensions
|
||||
env:
|
||||
EXTENSIONS_FILTER_COMMAND: grep -E 'iscsi-tools|util-linux-tools'
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
run: |
|
||||
make installer-with-extensions
|
||||
- name: e2e-qemu-csi-longhorn
|
||||
env:
|
||||
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_WORKERS: "3"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml'
|
||||
run: |
|
||||
sudo -E make e2e-qemu
|
||||
- name: save artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: talos-logs-integration-qemu-csi-longhorn
|
||||
path: |-
|
||||
~/.talos/clusters/**/*.log
|
||||
!~/.talos/clusters/**/swtpm.log
|
||||
retention-days: "5"
|
||||
integration-qemu-csi-rook-ceph:
|
||||
permissions:
|
||||
actions: read
|
||||
contents: write
|
||||
issues: read
|
||||
packages: write
|
||||
pull-requests: read
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- talos
|
||||
if: contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi') || contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi-rook-ceph')
|
||||
needs:
|
||||
- default
|
||||
steps:
|
||||
- name: gather-system-info
|
||||
id: system-info
|
||||
uses: kenchan0130/actions-system-info@v1.3.0
|
||||
continue-on-error: true
|
||||
- name: print-system-info
|
||||
run: |
|
||||
MEMORY_GB=$((${{ steps.system-info.outputs.totalmem }}/1024/1024/1024))
|
||||
|
||||
OUTPUTS=(
|
||||
"CPU Core: ${{ steps.system-info.outputs.cpu-core }}"
|
||||
"CPU Model: ${{ steps.system-info.outputs.cpu-model }}"
|
||||
"Hostname: ${{ steps.system-info.outputs.hostname }}"
|
||||
"NodeName: ${NODE_NAME}"
|
||||
"Kernel release: ${{ steps.system-info.outputs.kernel-release }}"
|
||||
"Kernel version: ${{ steps.system-info.outputs.kernel-version }}"
|
||||
"Name: ${{ steps.system-info.outputs.name }}"
|
||||
"Platform: ${{ steps.system-info.outputs.platform }}"
|
||||
"Release: ${{ steps.system-info.outputs.release }}"
|
||||
"Total memory: ${MEMORY_GB} GB"
|
||||
)
|
||||
|
||||
for OUTPUT in "${OUTPUTS[@]}";do
|
||||
echo "${OUTPUT}"
|
||||
done
|
||||
continue-on-error: true
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Unshallow
|
||||
run: |
|
||||
git fetch --prune --unshallow
|
||||
- name: Set up Docker Buildx
|
||||
id: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: remote
|
||||
endpoint: tcp://buildkit-amd64.ci.svc.cluster.local:1234
|
||||
timeout-minutes: 10
|
||||
- name: Download artifacts
|
||||
if: github.event_name != 'schedule'
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: talos-artifacts
|
||||
path: _out
|
||||
- name: Fix artifact permissions
|
||||
if: github.event_name != 'schedule'
|
||||
run: |
|
||||
xargs -a _out/executable-artifacts -I {} chmod +x {}
|
||||
- name: build
|
||||
if: github.event_name == 'schedule'
|
||||
env:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
PLATFORM: linux/amd64
|
||||
PUSH: "true"
|
||||
run: |
|
||||
make talosctl-linux-amd64 kernel sd-boot sd-stub initramfs installer imager talos _out/integration-test-linux-amd64
|
||||
- name: talosctl-cni-bundle
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
make talosctl-cni-bundle
|
||||
- name: e2e-qemu-csi-rook-ceph
|
||||
env:
|
||||
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_CPUS_WORKERS: "4"
|
||||
QEMU_EXTRA_DISKS: "1"
|
||||
@ -2644,14 +2758,14 @@ jobs:
|
||||
QEMU_MEMORY_WORKERS: "5120"
|
||||
QEMU_WORKERS: "3"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_TEST: run_csi_tests
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/rook-ceph.yaml'
|
||||
run: |
|
||||
sudo -E make e2e-qemu
|
||||
- name: save artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: talos-logs-integration-qemu-csi
|
||||
name: talos-logs-integration-qemu-csi-rook-ceph
|
||||
path: |-
|
||||
~/.talos/clusters/**/*.log
|
||||
!~/.talos/clusters/**/swtpm.log
|
||||
|
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2024-05-27T16:20:10Z by kres bcb280a.
|
||||
# Generated on 2024-08-24T17:38:15Z by kres 8e4bbb4.
|
||||
|
||||
name: integration-cilium-cron
|
||||
concurrency:
|
||||
@ -79,7 +79,7 @@ jobs:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_WORKERS: "2"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/cluster/network", "value": {"cni": {"name": "none"}}}]'
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/cilium-no-kubeproxy.yaml'
|
||||
WITH_CUSTOM_CNI: cilium
|
||||
WITH_FIREWALL: accept
|
||||
WITH_SKIP_K8S_NODE_READINESS_CHECK: "yes"
|
||||
@ -91,7 +91,7 @@ jobs:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_WORKERS: "2"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/cluster/network", "value": {"cni": {"name": "none"}}}, {"op": "add", "path": "/cluster/proxy", "value": {"disabled": true}}]'
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/cilium-kubeproxy.yaml'
|
||||
WITH_CUSTOM_CNI: cilium
|
||||
WITH_FIREWALL: accept
|
||||
WITH_SKIP_K8S_NODE_READINESS_CHECK: "yes"
|
||||
@ -103,7 +103,7 @@ jobs:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_WORKERS: "2"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/cluster/network", "value": {"cni": {"name": "none"}}}, {"op": "add", "path": "/cluster/proxy", "value": {"disabled": true}}]'
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/cilium-kubeproxy.yaml'
|
||||
WITH_CUSTOM_CNI: cilium
|
||||
WITH_FIREWALL: accept
|
||||
WITH_KUBESPAN: "true"
|
||||
|
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2024-05-27T16:20:10Z by kres bcb280a.
|
||||
# Generated on 2024-08-24T17:38:15Z by kres 8e4bbb4.
|
||||
|
||||
name: integration-extensions-cron
|
||||
concurrency:
|
||||
@ -109,7 +109,7 @@ jobs:
|
||||
QEMU_EXTRA_DISKS: "3"
|
||||
QEMU_MEMORY_WORKERS: "4096"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH_WORKER: '@_out/extensions-patch.yaml'
|
||||
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml'
|
||||
run: |
|
||||
sudo -E make e2e-qemu
|
||||
- name: save artifacts
|
||||
|
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2024-05-27T16:20:10Z by kres bcb280a.
|
||||
# Generated on 2024-08-24T17:38:15Z by kres 8e4bbb4.
|
||||
|
||||
name: integration-misc-2-cron
|
||||
concurrency:
|
||||
@ -91,7 +91,7 @@ jobs:
|
||||
env:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/machine/install/extraKernelArgs/-", "value": "talos.unified_cgroup_hierarchy=0"}]'
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/cgroupsv1.yaml'
|
||||
WITH_UEFI: "false"
|
||||
run: |
|
||||
sudo -E make e2e-qemu
|
||||
|
116
.github/workflows/integration-qemu-csi-longhorn-cron.yaml
vendored
Normal file
116
.github/workflows/integration-qemu-csi-longhorn-cron.yaml
vendored
Normal file
@ -0,0 +1,116 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2024-08-24T17:38:15Z by kres 8e4bbb4.
|
||||
|
||||
name: integration-qemu-csi-longhorn-cron
|
||||
concurrency:
|
||||
group: ${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
"on":
|
||||
schedule:
|
||||
- cron: 30 3 * * *
|
||||
jobs:
|
||||
default:
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- talos
|
||||
steps:
|
||||
- name: gather-system-info
|
||||
id: system-info
|
||||
uses: kenchan0130/actions-system-info@v1.3.0
|
||||
continue-on-error: true
|
||||
- name: print-system-info
|
||||
run: |
|
||||
MEMORY_GB=$((${{ steps.system-info.outputs.totalmem }}/1024/1024/1024))
|
||||
|
||||
OUTPUTS=(
|
||||
"CPU Core: ${{ steps.system-info.outputs.cpu-core }}"
|
||||
"CPU Model: ${{ steps.system-info.outputs.cpu-model }}"
|
||||
"Hostname: ${{ steps.system-info.outputs.hostname }}"
|
||||
"NodeName: ${NODE_NAME}"
|
||||
"Kernel release: ${{ steps.system-info.outputs.kernel-release }}"
|
||||
"Kernel version: ${{ steps.system-info.outputs.kernel-version }}"
|
||||
"Name: ${{ steps.system-info.outputs.name }}"
|
||||
"Platform: ${{ steps.system-info.outputs.platform }}"
|
||||
"Release: ${{ steps.system-info.outputs.release }}"
|
||||
"Total memory: ${MEMORY_GB} GB"
|
||||
)
|
||||
|
||||
for OUTPUT in "${OUTPUTS[@]}";do
|
||||
echo "${OUTPUT}"
|
||||
done
|
||||
continue-on-error: true
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Unshallow
|
||||
run: |
|
||||
git fetch --prune --unshallow
|
||||
- name: Set up Docker Buildx
|
||||
id: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
driver: remote
|
||||
endpoint: tcp://buildkit-amd64.ci.svc.cluster.local:1234
|
||||
timeout-minutes: 10
|
||||
- name: Download artifacts
|
||||
if: github.event_name != 'schedule'
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: talos-artifacts
|
||||
path: _out
|
||||
- name: Fix artifact permissions
|
||||
if: github.event_name != 'schedule'
|
||||
run: |
|
||||
xargs -a _out/executable-artifacts -I {} chmod +x {}
|
||||
- name: build
|
||||
if: github.event_name == 'schedule'
|
||||
env:
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
PLATFORM: linux/amd64
|
||||
PUSH: "true"
|
||||
run: |
|
||||
make talosctl-linux-amd64 kernel sd-boot sd-stub initramfs installer imager talos _out/integration-test-linux-amd64
|
||||
- name: talosctl-cni-bundle
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
make talosctl-cni-bundle
|
||||
- name: checkout extensions
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
path: _out/extensions
|
||||
ref: main
|
||||
repository: siderolabs/extensions
|
||||
- name: set variables
|
||||
run: |
|
||||
cat _out/talos-metadata >> "$GITHUB_ENV"
|
||||
- name: build extensions
|
||||
env:
|
||||
PLATFORM: linux/amd64
|
||||
PUSH: "true"
|
||||
REGISTRY: registry.dev.siderolabs.io
|
||||
run: |
|
||||
make iscsi-tools util-linux-tools extensions-metadata -C _out/extensions
|
||||
- name: installer extensions
|
||||
env:
|
||||
EXTENSIONS_FILTER_COMMAND: grep -E 'iscsi-tools|util-linux-tools'
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
run: |
|
||||
make installer-with-extensions
|
||||
- name: e2e-qemu-csi-longhorn
|
||||
env:
|
||||
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_WORKERS: "3"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml'
|
||||
run: |
|
||||
sudo -E make e2e-qemu
|
||||
- name: save artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: talos-logs-integration-qemu-csi-longhorn
|
||||
path: |-
|
||||
~/.talos/clusters/**/*.log
|
||||
!~/.talos/clusters/**/swtpm.log
|
||||
retention-days: "5"
|
@ -1,8 +1,8 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2024-05-27T16:20:10Z by kres bcb280a.
|
||||
# Generated on 2024-08-24T17:38:15Z by kres 8e4bbb4.
|
||||
|
||||
name: integration-qemu-csi-cron
|
||||
name: integration-qemu-csi-rook-ceph-cron
|
||||
concurrency:
|
||||
group: ${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
@ -74,8 +74,9 @@ jobs:
|
||||
if: github.event_name == 'schedule'
|
||||
run: |
|
||||
make talosctl-cni-bundle
|
||||
- name: e2e-qemu-csi
|
||||
- name: e2e-qemu-csi-rook-ceph
|
||||
env:
|
||||
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
QEMU_CPUS_WORKERS: "4"
|
||||
QEMU_EXTRA_DISKS: "1"
|
||||
@ -83,14 +84,14 @@ jobs:
|
||||
QEMU_MEMORY_WORKERS: "5120"
|
||||
QEMU_WORKERS: "3"
|
||||
SHORT_INTEGRATION_TEST: "yes"
|
||||
WITH_TEST: run_csi_tests
|
||||
WITH_CONFIG_PATCH: '@hack/test/patches/rook-ceph.yaml'
|
||||
run: |
|
||||
sudo -E make e2e-qemu
|
||||
- name: save artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: talos-logs-integration-qemu-csi
|
||||
name: talos-logs-integration-qemu-csi-rook-ceph
|
||||
path: |-
|
||||
~/.talos/clusters/**/*.log
|
||||
!~/.talos/clusters/**/swtpm.log
|
5
.github/workflows/slack-notify.yaml
vendored
5
.github/workflows/slack-notify.yaml
vendored
@ -1,6 +1,6 @@
|
||||
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
|
||||
#
|
||||
# Generated on 2024-06-11T15:17:44Z by kres 7360563.
|
||||
# Generated on 2024-08-24T17:38:15Z by kres 8e4bbb4.
|
||||
|
||||
name: slack-notify
|
||||
"on":
|
||||
@ -22,7 +22,8 @@ name: slack-notify
|
||||
- integration-cilium-cron
|
||||
- integration-qemu-encrypted-vip-cron
|
||||
- integration-qemu-race-cron
|
||||
- integration-qemu-csi-cron
|
||||
- integration-qemu-csi-rook-ceph-cron
|
||||
- integration-qemu-csi-longhorn-cron
|
||||
- integration-images-cron
|
||||
- integration-reproducibility-test-cron
|
||||
- integration-cloud-images-cron
|
||||
|
96
.kres.yaml
96
.kres.yaml
@ -67,7 +67,8 @@ spec:
|
||||
- integration-conformance
|
||||
- integration-qemu-encrypted-vip
|
||||
- integration-qemu-race
|
||||
- integration-qemu-csi
|
||||
- integration-qemu-csi-rook-ceph
|
||||
- integration-qemu-csi-longhorn
|
||||
- integration-images
|
||||
- integration-reproducibility-test
|
||||
- integration-cloud-images
|
||||
@ -767,7 +768,7 @@ spec:
|
||||
environment:
|
||||
SHORT_INTEGRATION_TEST: yes
|
||||
WITH_UEFI: false
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/machine/install/extraKernelArgs/-", "value": "talos.unified_cgroup_hierarchy=0"}]' #use cgroupsv1
|
||||
WITH_CONFIG_PATCH: "@hack/test/patches/cgroupsv1.yaml" #use cgroupsv1
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
- name: e2e-disk-image
|
||||
command: e2e-qemu
|
||||
@ -965,7 +966,7 @@ spec:
|
||||
withSudo: true
|
||||
environment:
|
||||
QEMU_MEMORY_WORKERS: 4096
|
||||
WITH_CONFIG_PATCH_WORKER: "@_out/extensions-patch.yaml"
|
||||
WITH_CONFIG_PATCH_WORKER: "@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml"
|
||||
QEMU_EXTRA_DISKS: 3
|
||||
SHORT_INTEGRATION_TEST: yes
|
||||
EXTRA_TEST_ARGS: -talos.extensions.qemu
|
||||
@ -1020,7 +1021,7 @@ spec:
|
||||
WITH_CUSTOM_CNI: cilium
|
||||
WITH_FIREWALL: accept
|
||||
QEMU_WORKERS: 2
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/cluster/network", "value": {"cni": {"name": "none"}}}]'
|
||||
WITH_CONFIG_PATCH: "@hack/test/patches/cilium-no-kubeproxy.yaml"
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
- name: e2e-cilium-strict
|
||||
command: e2e-qemu
|
||||
@ -1032,7 +1033,7 @@ spec:
|
||||
WITH_FIREWALL: accept
|
||||
QEMU_WORKERS: 2
|
||||
CILIUM_INSTALL_TYPE: strict
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/cluster/network", "value": {"cni": {"name": "none"}}}, {"op": "add", "path": "/cluster/proxy", "value": {"disabled": true}}]'
|
||||
WITH_CONFIG_PATCH: "@hack/test/patches/cilium-kubeproxy.yaml"
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
- name: e2e-cilium-strict-kubespan
|
||||
command: e2e-qemu
|
||||
@ -1045,7 +1046,7 @@ spec:
|
||||
WITH_KUBESPAN: true
|
||||
QEMU_WORKERS: 2
|
||||
CILIUM_INSTALL_TYPE: strict
|
||||
WITH_CONFIG_PATCH: '[{"op": "add", "path": "/cluster/network", "value": {"cni": {"name": "none"}}}, {"op": "add", "path": "/cluster/proxy", "value": {"disabled": true}}]'
|
||||
WITH_CONFIG_PATCH: "@hack/test/patches/cilium-kubeproxy.yaml"
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
- name: save-talos-logs
|
||||
conditions:
|
||||
@ -1160,7 +1161,7 @@ spec:
|
||||
artifactPath: ~/.talos/clusters/**/*.log
|
||||
additionalArtifacts:
|
||||
- "!~/.talos/clusters/**/swtpm.log"
|
||||
- name: integration-qemu-csi
|
||||
- name: integration-qemu-csi-rook-ceph
|
||||
buildxOptions:
|
||||
enabled: true
|
||||
depends:
|
||||
@ -1172,6 +1173,7 @@ spec:
|
||||
- '30 3 * * *'
|
||||
triggerLabels:
|
||||
- integration/qemu-csi
|
||||
- integration/qemu-csi-rook-ceph
|
||||
steps:
|
||||
- name: download-artifacts
|
||||
conditions:
|
||||
@ -1191,7 +1193,7 @@ spec:
|
||||
- name: talosctl-cni-bundle
|
||||
conditions:
|
||||
- only-on-schedule
|
||||
- name: e2e-qemu-csi
|
||||
- name: e2e-qemu-csi-rook-ceph
|
||||
command: e2e-qemu
|
||||
withSudo: true
|
||||
environment:
|
||||
@ -1201,14 +1203,88 @@ spec:
|
||||
QEMU_MEMORY_WORKERS: 5120
|
||||
QEMU_EXTRA_DISKS: 1
|
||||
QEMU_EXTRA_DISKS_SIZE: 12288
|
||||
WITH_TEST: run_csi_tests
|
||||
WITH_CONFIG_PATCH: "@hack/test/patches/rook-ceph.yaml"
|
||||
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
- name: save-talos-logs
|
||||
conditions:
|
||||
- always
|
||||
artifactStep:
|
||||
type: upload
|
||||
artifactName: talos-logs-integration-qemu-csi
|
||||
artifactName: talos-logs-integration-qemu-csi-rook-ceph
|
||||
disableExecutableListGeneration: true
|
||||
artifactPath: ~/.talos/clusters/**/*.log
|
||||
additionalArtifacts:
|
||||
- "!~/.talos/clusters/**/swtpm.log"
|
||||
- name: integration-qemu-csi-longhorn
|
||||
buildxOptions:
|
||||
enabled: true
|
||||
depends:
|
||||
- default
|
||||
runners:
|
||||
- self-hosted
|
||||
- talos
|
||||
crons:
|
||||
- '30 3 * * *'
|
||||
triggerLabels:
|
||||
- integration/qemu-csi
|
||||
- integration/qemu-csi-longhorn
|
||||
steps:
|
||||
- name: download-artifacts
|
||||
conditions:
|
||||
- not-on-schedule
|
||||
artifactStep:
|
||||
type: download
|
||||
artifactName: talos-artifacts
|
||||
artifactPath: _out
|
||||
- name: build
|
||||
conditions:
|
||||
- only-on-schedule
|
||||
command: talosctl-linux-amd64 kernel sd-boot sd-stub initramfs installer imager talos _out/integration-test-linux-amd64
|
||||
environment:
|
||||
PLATFORM: linux/amd64
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
PUSH: true
|
||||
- name: talosctl-cni-bundle
|
||||
conditions:
|
||||
- only-on-schedule
|
||||
- name: checkout extensions
|
||||
checkoutStep:
|
||||
repository: siderolabs/extensions
|
||||
ref: main
|
||||
path: _out/extensions
|
||||
- name: set variables
|
||||
nonMakeStep: true
|
||||
command: cat _out/talos-metadata >> "$GITHUB_ENV"
|
||||
- name: build extensions
|
||||
command: iscsi-tools util-linux-tools extensions-metadata
|
||||
arguments:
|
||||
- -C
|
||||
- _out/extensions
|
||||
environment:
|
||||
PLATFORM: linux/amd64
|
||||
PUSH: true
|
||||
REGISTRY: registry.dev.siderolabs.io
|
||||
- name: installer extensions
|
||||
command: installer-with-extensions
|
||||
environment:
|
||||
EXTENSIONS_FILTER_COMMAND: "grep -E 'iscsi-tools|util-linux-tools'"
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
- name: e2e-qemu-csi-longhorn
|
||||
command: e2e-qemu
|
||||
withSudo: true
|
||||
environment:
|
||||
SHORT_INTEGRATION_TEST: yes
|
||||
QEMU_WORKERS: 3
|
||||
WITH_CONFIG_PATCH: "@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml"
|
||||
EXTRA_TEST_ARGS: -talos.csi=longhorn
|
||||
IMAGE_REGISTRY: registry.dev.siderolabs.io
|
||||
- name: save-talos-logs
|
||||
conditions:
|
||||
- always
|
||||
artifactStep:
|
||||
type: upload
|
||||
artifactName: talos-logs-integration-qemu-csi-longhorn
|
||||
disableExecutableListGeneration: true
|
||||
artifactPath: ~/.talos/clusters/**/*.log
|
||||
additionalArtifacts:
|
||||
|
6
Makefile
6
Makefile
@ -226,6 +226,8 @@ COMMON_ARGS += --build-arg=MICROSOFT_SECUREBOOT_RELEASE=$(MICROSOFT_SECUREBOOT_R
|
||||
|
||||
CI_ARGS ?=
|
||||
|
||||
EXTENSIONS_FILTER_COMMAND ?= "grep -vE 'tailscale|xen-guest-agent|nvidia|vmtoolsd-guest-agent'"
|
||||
|
||||
all: initramfs kernel installer imager talosctl talosctl-image talos
|
||||
|
||||
# Help Menu
|
||||
@ -542,9 +544,9 @@ provision-tests-track-%:
|
||||
|
||||
installer-with-extensions: $(ARTIFACTS)/extensions/_out/extensions-metadata
|
||||
$(MAKE) image-installer \
|
||||
IMAGER_ARGS="--base-installer-image=$(REGISTRY_AND_USERNAME)/installer:$(IMAGE_TAG) $(shell cat $(ARTIFACTS)/extensions/_out/extensions-metadata | grep -vE 'tailscale|xen-guest-agent|nvidia|vmtoolsd-guest-agent' | xargs -n 1 echo --system-extension-image)"
|
||||
IMAGER_ARGS="--base-installer-image=$(REGISTRY_AND_USERNAME)/installer:$(IMAGE_TAG) $(shell cat $(ARTIFACTS)/extensions/_out/extensions-metadata | $(EXTENSIONS_FILTER_COMMAND) | xargs -n 1 echo --system-extension-image)"
|
||||
crane push $(ARTIFACTS)/installer-amd64.tar $(REGISTRY_AND_USERNAME)/installer:$(IMAGE_TAG)-amd64-extensions
|
||||
echo -n "$(REGISTRY_AND_USERNAME)/installer:$(IMAGE_TAG)-amd64-extensions" | jq -Rs -f hack/test/extensions/extension-patch-filter.jq | yq eval ".[] | split_doc" -P > $(ARTIFACTS)/extensions-patch.yaml
|
||||
INSTALLER_IMAGE_EXTENSIONS="$(REGISTRY_AND_USERNAME)/installer:$(IMAGE_TAG)-amd64-extensions" yq eval -n '.machine.install.image = strenv(INSTALLER_IMAGE_EXTENSIONS)' > $(ARTIFACTS)/installer-extensions-patch.yaml
|
||||
|
||||
# Assets for releases
|
||||
|
||||
|
@ -116,7 +116,11 @@ case "${WITH_CONFIG_PATCH:-false}" in
|
||||
false)
|
||||
;;
|
||||
*)
|
||||
QEMU_FLAGS+=("--config-patch=${WITH_CONFIG_PATCH}")
|
||||
[[ ! ${WITH_CONFIG_PATCH} =~ ^@ ]] && echo "WITH_CONFIG_PATCH variable should start with @" && exit 1
|
||||
|
||||
for i in ${WITH_CONFIG_PATCH//:/ }; do
|
||||
QEMU_FLAGS+=("--config-patch=${i}")
|
||||
done
|
||||
;;
|
||||
esac
|
||||
|
||||
@ -124,7 +128,11 @@ case "${WITH_CONFIG_PATCH_WORKER:-false}" in
|
||||
false)
|
||||
;;
|
||||
*)
|
||||
QEMU_FLAGS+=("--config-patch-worker=${WITH_CONFIG_PATCH_WORKER}")
|
||||
[[ ! ${WITH_CONFIG_PATCH_WORKER} =~ ^@ ]] && echo "WITH_CONFIG_PATCH_WORKER variable should start with @" && exit 1
|
||||
|
||||
for i in ${WITH_CONFIG_PATCH_WORKER//:/ }; do
|
||||
QEMU_FLAGS+=("--config-patch-worker=${i}")
|
||||
done
|
||||
;;
|
||||
esac
|
||||
|
||||
|
@ -149,7 +149,19 @@ function run_talos_integration_test {
|
||||
;;
|
||||
esac
|
||||
|
||||
"${INTEGRATION_TEST}" -test.v -talos.failfast -talos.talosctlpath "${TALOSCTL}" -talos.kubectlpath "${KUBECTL}" -talos.provisioner "${PROVISIONER}" -talos.name "${CLUSTER_NAME}" -talos.image "${REGISTRY}/siderolabs/talos" "${EXTRA_TEST_ARGS[@]}" "${TEST_RUN[@]}" "${TEST_SHORT[@]}"
|
||||
"${INTEGRATION_TEST}" \
|
||||
-test.v \
|
||||
-talos.failfast \
|
||||
-talos.talosctlpath "${TALOSCTL}" \
|
||||
-talos.kubectlpath "${KUBECTL}" \
|
||||
-talos.helmpath "${HELM}" \
|
||||
-talos.kubestrpath "${KUBESTR}" \
|
||||
-talos.provisioner "${PROVISIONER}" \
|
||||
-talos.name "${CLUSTER_NAME}" \
|
||||
-talos.image "${REGISTRY}/siderolabs/talos" \
|
||||
"${EXTRA_TEST_ARGS[@]}" \
|
||||
"${TEST_RUN[@]}" \
|
||||
"${TEST_SHORT[@]}"
|
||||
}
|
||||
|
||||
function run_talos_integration_test_docker {
|
||||
@ -169,7 +181,18 @@ function run_talos_integration_test_docker {
|
||||
;;
|
||||
esac
|
||||
|
||||
"${INTEGRATION_TEST}" -test.v -talos.talosctlpath "${TALOSCTL}" -talos.kubectlpath "${KUBECTL}" -talos.provisioner "${PROVISIONER}" -talos.name "${CLUSTER_NAME}" -talos.image "${REGISTRY}/siderolabs/talos" "${EXTRA_TEST_ARGS[@]}" "${TEST_RUN[@]}" "${TEST_SHORT[@]}"
|
||||
"${INTEGRATION_TEST}" \
|
||||
-test.v \
|
||||
-talos.talosctlpath "${TALOSCTL}" \
|
||||
-talos.kubectlpath "${KUBECTL}" \
|
||||
-talos.helmpath "${HELM}" \
|
||||
-talos.kubestrpath "${KUBESTR}" \
|
||||
-talos.provisioner "${PROVISIONER}" \
|
||||
-talos.name "${CLUSTER_NAME}" \
|
||||
-talos.image "${REGISTRY}/siderolabs/talos" \
|
||||
"${EXTRA_TEST_ARGS[@]}" \
|
||||
"${TEST_RUN[@]}" \
|
||||
"${TEST_SHORT[@]}"
|
||||
}
|
||||
|
||||
function run_kubernetes_conformance_test {
|
||||
@ -220,24 +243,6 @@ function build_registry_mirrors {
|
||||
fi
|
||||
}
|
||||
|
||||
function run_csi_tests {
|
||||
${HELM} repo add rook-release https://charts.rook.io/release
|
||||
${HELM} repo update
|
||||
${HELM} upgrade --install --version=v1.8.2 --set=pspEnable=false --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph
|
||||
${HELM} upgrade --install --version=v1.8.2 --set=pspEnable=false --create-namespace --namespace rook-ceph rook-ceph-cluster rook-release/rook-ceph-cluster
|
||||
|
||||
${KUBECTL} label ns rook-ceph pod-security.kubernetes.io/enforce=privileged
|
||||
# wait for the controller to populate the status field
|
||||
sleep 30
|
||||
${KUBECTL} --namespace rook-ceph wait --timeout=900s --for=jsonpath='{.status.phase}=Ready' cephclusters.ceph.rook.io/rook-ceph
|
||||
${KUBECTL} --namespace rook-ceph wait --timeout=900s --for=jsonpath='{.status.state}=Created' cephclusters.ceph.rook.io/rook-ceph
|
||||
# .status.ceph is populated later only
|
||||
sleep 60
|
||||
${KUBECTL} --namespace rook-ceph wait --timeout=900s --for=jsonpath='{.status.ceph.health}=HEALTH_OK' cephclusters.ceph.rook.io/rook-ceph
|
||||
# hack until https://github.com/kastenhq/kubestr/issues/101 is addressed
|
||||
KUBERNETES_SERVICE_HOST="" KUBECONFIG="${TMP}/kubeconfig" "${KUBESTR}" fio --storageclass ceph-block --size 10G
|
||||
}
|
||||
|
||||
function install_and_run_cilium_cni_tests {
|
||||
get_kubeconfig
|
||||
|
||||
|
4
hack/test/patches/cgroupsv1.yaml
Normal file
4
hack/test/patches/cgroupsv1.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
machine:
|
||||
install:
|
||||
extraKernelArgs:
|
||||
- talos.unified_cgroup_hierarchy=0
|
6
hack/test/patches/cilium-kubeproxy.yaml
Normal file
6
hack/test/patches/cilium-kubeproxy.yaml
Normal file
@ -0,0 +1,6 @@
|
||||
cluster:
|
||||
network:
|
||||
cni:
|
||||
name: none
|
||||
proxy:
|
||||
disabled: true
|
4
hack/test/patches/cilium-no-kubeproxy.yaml
Normal file
4
hack/test/patches/cilium-no-kubeproxy.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
cluster:
|
||||
network:
|
||||
cni:
|
||||
name: none
|
47
hack/test/patches/extensions.yaml
Normal file
47
hack/test/patches/extensions.yaml
Normal file
@ -0,0 +1,47 @@
|
||||
machine:
|
||||
sysctls:
|
||||
user.max_user_namespaces: "11255"
|
||||
kernel:
|
||||
modules:
|
||||
- name: asix
|
||||
- name: ax88179_178a
|
||||
- name: ax88796b
|
||||
- name: binfmt_misc
|
||||
- name: btrfs
|
||||
- name: cdc_ether
|
||||
- name: cdc_mbim
|
||||
- name: cdc_ncm
|
||||
- name: cdc_subset
|
||||
- name: cdc_wdm
|
||||
- name: cxgb
|
||||
- name: cxgb3
|
||||
- name: cxgb4
|
||||
- name: cxgb4vf
|
||||
- name: drbd
|
||||
- name: gasket
|
||||
- name: net1080
|
||||
- name: option
|
||||
- name: qmi_wwan
|
||||
- name: r8153_ecm
|
||||
- name: thunderbolt
|
||||
- name: thunderbolt_net
|
||||
- name: usb_wwan
|
||||
- name: usbnet
|
||||
- name: usbserial
|
||||
- name: zaurus
|
||||
- name: zfs
|
||||
---
|
||||
apiVersion: v1alpha1
|
||||
kind: ExtensionServiceConfig
|
||||
name: tailscale
|
||||
environment:
|
||||
- TS_AUTHKEY=tskey-0000000000000000
|
||||
---
|
||||
apiVersion: v1alpha1
|
||||
kind: ExtensionServiceConfig
|
||||
name: nut-client
|
||||
configFiles:
|
||||
- content: |-
|
||||
MONITOR ${upsmonHost} 1 remote ${upsmonPasswd} slave
|
||||
SHUTDOWNCMD "/sbin/poweroff"
|
||||
mountPath: /usr/local/etc/nut/upsmon.conf
|
18
hack/test/patches/longhorn.yaml
Normal file
18
hack/test/patches/longhorn.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
machine:
|
||||
kubelet:
|
||||
extraMounts:
|
||||
- destination: /var/lib/longhorn
|
||||
type: bind
|
||||
source: /var/lib/longhorn
|
||||
options:
|
||||
- bind
|
||||
- rshared
|
||||
- rw
|
||||
cluster:
|
||||
apiServer:
|
||||
admissionControl:
|
||||
- name: PodSecurity
|
||||
configuration:
|
||||
exemptions:
|
||||
namespaces:
|
||||
- longhorn-system
|
8
hack/test/patches/rook-ceph.yaml
Normal file
8
hack/test/patches/rook-ceph.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
cluster:
|
||||
apiServer:
|
||||
admissionControl:
|
||||
- name: PodSecurity
|
||||
configuration:
|
||||
exemptions:
|
||||
namespaces:
|
||||
- rook-ceph
|
@ -33,6 +33,10 @@ type TalosSuite struct {
|
||||
TalosctlPath string
|
||||
// KubectlPath is a path to kubectl binary
|
||||
KubectlPath string
|
||||
// HelmPath is a path to helm binary
|
||||
HelmPath string
|
||||
// KubeStrPath is a path to kubestr binary
|
||||
KubeStrPath string
|
||||
// ExtensionsQEMU runs tests with qemu and extensions enabled
|
||||
ExtensionsQEMU bool
|
||||
// ExtensionsNvidia runs tests with nvidia extensions enabled
|
||||
@ -41,6 +45,10 @@ type TalosSuite struct {
|
||||
TrustedBoot bool
|
||||
// TalosImage is the image name for 'talos' container.
|
||||
TalosImage string
|
||||
// CSITestName is the name of the CSI test to run
|
||||
CSITestName string
|
||||
// CSITestTimeout is the timeout for the CSI test
|
||||
CSITestTimeout string
|
||||
|
||||
discoveredNodes cluster.Info
|
||||
}
|
||||
|
@ -13,6 +13,9 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
@ -40,6 +43,7 @@ import (
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/client-go/util/jsonpath"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
|
||||
taloskubernetes "github.com/siderolabs/talos/pkg/kubernetes"
|
||||
@ -278,6 +282,163 @@ func (k8sSuite *K8sSuite) WaitForPodToBeDeleted(ctx context.Context, timeout tim
|
||||
}
|
||||
}
|
||||
|
||||
// HelmInstall installs the Helm chart with the given namespace, repository, version, release name, chart name and values.
|
||||
func (k8sSuite *K8sSuite) HelmInstall(ctx context.Context, namespace, repository, version, releaseName, chartName string, valuesBytes []byte) error {
|
||||
tempFile := filepath.Join(k8sSuite.T().TempDir(), "values.yaml")
|
||||
|
||||
if err := os.WriteFile(tempFile, valuesBytes, 0o644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer os.Remove(tempFile) //nolint:errcheck
|
||||
|
||||
args := []string{
|
||||
"upgrade",
|
||||
"--install",
|
||||
"--cleanup-on-fail",
|
||||
"--create-namespace",
|
||||
"--namespace",
|
||||
namespace,
|
||||
"--wait",
|
||||
"--timeout",
|
||||
k8sSuite.CSITestTimeout,
|
||||
"--repo",
|
||||
repository,
|
||||
"--version",
|
||||
version,
|
||||
"--values",
|
||||
tempFile,
|
||||
releaseName,
|
||||
chartName,
|
||||
}
|
||||
|
||||
cmd := exec.Command(k8sSuite.HelmPath, args...)
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
k8sSuite.T().Logf("running helm command: %s", strings.Join(cmd.Args, " "))
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// WaitForResource waits for the resource with the given group, kind, version, namespace and jsonpath field selector to have the given expected value.
|
||||
// mostly a restructuring of `kubectl wait` from https://github.com/kubernetes/kubectl/blob/master/pkg/cmd/wait/wait.go
|
||||
//
|
||||
//nolint:gocyclo
|
||||
func (k8sSuite *K8sSuite) WaitForResource(ctx context.Context, namespace, group, kind, version, resourceName, jsonPathSelector, expectedValue string) error {
|
||||
j := jsonpath.New("wait").AllowMissingKeys(true)
|
||||
|
||||
if jsonPathSelector == "" {
|
||||
return fmt.Errorf("jsonpath condition is empty")
|
||||
}
|
||||
|
||||
if err := j.Parse(jsonPathSelector); err != nil {
|
||||
return fmt.Errorf("error parsing jsonpath condition: %v", err)
|
||||
}
|
||||
|
||||
mapping, err := k8sSuite.Mapper.RESTMapping(schema.GroupKind{
|
||||
Group: group,
|
||||
Kind: kind,
|
||||
}, version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating mapping for resource %s/%s/%s", group, kind, version)
|
||||
}
|
||||
|
||||
dr := k8sSuite.DynamicClient.Resource(mapping.Resource).Namespace(namespace)
|
||||
|
||||
fieldSelector := fields.OneTermEqualSelector("metadata.name", resourceName).String()
|
||||
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
|
||||
return dr.List(ctx, options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fieldSelector
|
||||
|
||||
return dr.Watch(ctx, options)
|
||||
},
|
||||
}
|
||||
|
||||
preconditionFunc := func(store cache.Store) (bool, error) {
|
||||
var exists bool
|
||||
|
||||
_, exists, err = store.Get(&metav1.ObjectMeta{Namespace: namespace, Name: resourceName})
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return true, fmt.Errorf("resource %s/%s/%s/%s not found", group, version, kind, resourceName)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, preconditionFunc, func(event watch.Event) (bool, error) {
|
||||
obj, ok := event.Object.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("error converting object to unstructured")
|
||||
}
|
||||
|
||||
queryObj := obj.UnstructuredContent()
|
||||
|
||||
k8sSuite.T().Logf("waiting for resource %s/%s/%s/%s to have field %s with value %s", group, version, kind, resourceName, jsonPathSelector, expectedValue)
|
||||
|
||||
parseResults, err := j.FindResults(queryObj)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error finding results: %v", err)
|
||||
}
|
||||
|
||||
if len(parseResults) == 0 || len(parseResults[0]) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(parseResults) > 1 {
|
||||
return false, fmt.Errorf("given jsonpath expression matches more than one list")
|
||||
}
|
||||
|
||||
if len(parseResults[0]) > 1 {
|
||||
return false, fmt.Errorf("given jsonpath expression matches more than one value")
|
||||
}
|
||||
|
||||
switch parseResults[0][0].Interface().(type) {
|
||||
case map[string]interface{}, []interface{}:
|
||||
return false, fmt.Errorf("jsonpath leads to a nested object or list which is not supported")
|
||||
}
|
||||
|
||||
s := fmt.Sprintf("%v", parseResults[0][0].Interface())
|
||||
|
||||
return strings.TrimSpace(s) == strings.TrimSpace(expectedValue), nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunFIOTest runs the FIO test with the given storage class and size using kubestr.
|
||||
func (k8sSuite *K8sSuite) RunFIOTest(ctx context.Context, storageClasss, size string) error {
|
||||
args := []string{
|
||||
"fio",
|
||||
"--storageclass",
|
||||
storageClasss,
|
||||
"--size",
|
||||
size,
|
||||
}
|
||||
|
||||
cmd := exec.Command(k8sSuite.KubeStrPath, args...)
|
||||
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
k8sSuite.T().Logf("running kubestr command: %s", strings.Join(cmd.Args, " "))
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// ExecuteCommandInPod executes the given command in the pod with the given namespace and name.
|
||||
func (k8sSuite *K8sSuite) ExecuteCommandInPod(ctx context.Context, namespace, podName, command string) (string, string, error) {
|
||||
cmd := []string{
|
||||
|
@ -36,11 +36,12 @@ var allSuites []suite.TestingSuite
|
||||
|
||||
// Flag values.
|
||||
var (
|
||||
failFast bool
|
||||
crashdumpEnabled bool
|
||||
trustedBoot bool
|
||||
extensionsQEMU bool
|
||||
extensionsNvidia bool
|
||||
failFast bool
|
||||
crashdumpEnabled bool
|
||||
trustedBoot bool
|
||||
extensionsQEMU bool
|
||||
extensionsNvidia bool
|
||||
|
||||
talosConfig string
|
||||
endpoint string
|
||||
k8sEndpoint string
|
||||
@ -48,10 +49,14 @@ var (
|
||||
expectedGoVersion string
|
||||
talosctlPath string
|
||||
kubectlPath string
|
||||
helmPath string
|
||||
kubeStrPath string
|
||||
provisionerName string
|
||||
clusterName string
|
||||
stateDir string
|
||||
talosImage string
|
||||
csiTestName string
|
||||
csiTestTimeout string
|
||||
)
|
||||
|
||||
// TestIntegration ...
|
||||
@ -103,10 +108,14 @@ func TestIntegration(t *testing.T) {
|
||||
GoVersion: expectedGoVersion,
|
||||
TalosctlPath: talosctlPath,
|
||||
KubectlPath: kubectlPath,
|
||||
HelmPath: helmPath,
|
||||
KubeStrPath: kubeStrPath,
|
||||
ExtensionsQEMU: extensionsQEMU,
|
||||
ExtensionsNvidia: extensionsNvidia,
|
||||
TrustedBoot: trustedBoot,
|
||||
TalosImage: talosImage,
|
||||
CSITestName: csiTestName,
|
||||
CSITestTimeout: csiTestTimeout,
|
||||
})
|
||||
}
|
||||
|
||||
@ -166,7 +175,11 @@ func init() {
|
||||
flag.StringVar(&expectedGoVersion, "talos.go.version", constants.GoVersion, "expected Talos version")
|
||||
flag.StringVar(&talosctlPath, "talos.talosctlpath", "talosctl", "The path to 'talosctl' binary")
|
||||
flag.StringVar(&kubectlPath, "talos.kubectlpath", "kubectl", "The path to 'kubectl' binary")
|
||||
flag.StringVar(&helmPath, "talos.helmpath", "helm", "The path to 'helm' binary")
|
||||
flag.StringVar(&kubeStrPath, "talos.kubestrpath", "kubestr", "The path to 'kubestr' binary")
|
||||
flag.StringVar(&talosImage, "talos.image", images.DefaultTalosImageRepository, "The default 'talos' container image")
|
||||
flag.StringVar(&csiTestName, "talos.csi", "", "CSI test to run")
|
||||
flag.StringVar(&csiTestTimeout, "talos.csi.timeout", "15m", "CSI test timeout")
|
||||
|
||||
flag.StringVar(&provision_test.DefaultSettings.CIDR, "talos.provision.cidr", provision_test.DefaultSettings.CIDR, "CIDR to use to provision clusters (provision tests only)")
|
||||
flag.Var(&provision_test.DefaultSettings.RegistryMirrors, "talos.provision.registry-mirror", "registry mirrors to use (provision tests only)")
|
||||
|
17
internal/integration/k8s/constants.go
Normal file
17
internal/integration/k8s/constants.go
Normal file
@ -0,0 +1,17 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build integration
|
||||
|
||||
// Package k8s provides Kubernetes integration tests.
|
||||
package k8s
|
||||
|
||||
const (
|
||||
// RookCephHelmChartVersion is the version of the Rook Ceph Helm chart to use.
|
||||
// renovate: datasource=helm versioning=helm depName=rook-ceph registryUrl=https://charts.rook.io/release
|
||||
RookCephHelmChartVersion = "v1.15.0"
|
||||
// LongHornHelmChartVersion is the version of the Longhorn Helm chart to use.
|
||||
// renovate: datasource=helm versioning=helm depName=longhorn registryUrl=https://charts.longhorn.io
|
||||
LongHornHelmChartVersion = "v1.7.0"
|
||||
)
|
61
internal/integration/k8s/longhorn.go
Normal file
61
internal/integration/k8s/longhorn.go
Normal file
@ -0,0 +1,61 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build integration_k8s
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/siderolabs/talos/internal/integration/base"
|
||||
)
|
||||
|
||||
// LongHornSuite tests deploying Longhorn.
|
||||
type LongHornSuite struct {
|
||||
base.K8sSuite
|
||||
}
|
||||
|
||||
// SuiteName returns the name of the suite.
|
||||
func (suite *LongHornSuite) SuiteName() string {
|
||||
return "k8s.LongHornSuite"
|
||||
}
|
||||
|
||||
// TestDeploy tests deploying Longhorn and running a simple test.
|
||||
func (suite *LongHornSuite) TestDeploy() {
|
||||
if suite.Cluster == nil {
|
||||
suite.T().Skip("without full cluster state reaching out to the node IP is not reliable")
|
||||
}
|
||||
|
||||
if suite.CSITestName != "longhorn" {
|
||||
suite.T().Skip("skipping longhorn test as it is not enabled")
|
||||
}
|
||||
|
||||
timeout, err := time.ParseDuration(suite.CSITestTimeout)
|
||||
if err != nil {
|
||||
suite.T().Fatalf("failed to parse timeout: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
suite.T().Cleanup(cancel)
|
||||
|
||||
if err := suite.HelmInstall(
|
||||
ctx,
|
||||
"longhorn-system",
|
||||
"https://charts.longhorn.io",
|
||||
LongHornHelmChartVersion,
|
||||
"longhorn",
|
||||
"longhorn",
|
||||
nil,
|
||||
); err != nil {
|
||||
suite.T().Fatalf("failed to install Longhorn chart: %v", err)
|
||||
}
|
||||
|
||||
suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn", "10G"))
|
||||
}
|
||||
|
||||
func init() {
|
||||
allSuites = append(allSuites, new(LongHornSuite))
|
||||
}
|
89
internal/integration/k8s/rook.go
Normal file
89
internal/integration/k8s/rook.go
Normal file
@ -0,0 +1,89 @@
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
//go:build integration_k8s
|
||||
|
||||
package k8s
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"time"
|
||||
|
||||
"github.com/siderolabs/talos/internal/integration/base"
|
||||
)
|
||||
|
||||
//go:embed testdata/rook-ceph-cluster-values.yaml
|
||||
var rookCephClusterValues []byte
|
||||
|
||||
// RookSuite tests deploying Rook.
|
||||
type RookSuite struct {
|
||||
base.K8sSuite
|
||||
}
|
||||
|
||||
// SuiteName returns the name of the suite.
|
||||
func (suite *RookSuite) SuiteName() string {
|
||||
return "k8s.RookSuite"
|
||||
}
|
||||
|
||||
// TestDeploy tests deploying Rook and running a simple test.
|
||||
func (suite *RookSuite) TestDeploy() {
|
||||
if suite.Cluster == nil {
|
||||
suite.T().Skip("without full cluster state reaching out to the node IP is not reliable")
|
||||
}
|
||||
|
||||
if suite.CSITestName != "rook-ceph" {
|
||||
suite.T().Skip("skipping rook-ceph test as it is not enabled")
|
||||
}
|
||||
|
||||
timeout, err := time.ParseDuration(suite.CSITestTimeout)
|
||||
if err != nil {
|
||||
suite.T().Fatalf("failed to parse timeout: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
suite.T().Cleanup(cancel)
|
||||
|
||||
if err := suite.HelmInstall(
|
||||
ctx,
|
||||
"rook-ceph",
|
||||
"https://charts.rook.io/release",
|
||||
RookCephHelmChartVersion,
|
||||
"rook-ceph",
|
||||
"rook-ceph",
|
||||
nil,
|
||||
); err != nil {
|
||||
suite.T().Fatalf("failed to install Rook chart: %v", err)
|
||||
}
|
||||
|
||||
if err := suite.HelmInstall(
|
||||
ctx,
|
||||
"rook-ceph",
|
||||
"https://charts.rook.io/release",
|
||||
RookCephHelmChartVersion,
|
||||
"rook-ceph-cluster",
|
||||
"rook-ceph-cluster",
|
||||
rookCephClusterValues,
|
||||
); err != nil {
|
||||
suite.T().Fatalf("failed to install Rook chart: %v", err)
|
||||
}
|
||||
|
||||
if err := suite.WaitForResource(ctx, "rook-ceph", "ceph.rook.io", "CephCluster", "v1", "rook-ceph", "{.status.phase}", "Ready"); err != nil {
|
||||
suite.T().Fatalf("failed to wait for CephCluster to be Ready: %v", err)
|
||||
}
|
||||
|
||||
if err := suite.WaitForResource(ctx, "rook-ceph", "ceph.rook.io", "CephCluster", "v1", "rook-ceph", "{.status.state}", "Created"); err != nil {
|
||||
suite.T().Fatalf("failed to wait for CephCluster to be Created: %v", err)
|
||||
}
|
||||
|
||||
if err := suite.WaitForResource(ctx, "rook-ceph", "ceph.rook.io", "CephCluster", "v1", "rook-ceph", "{.status.ceph.health}", "HEALTH_OK"); err != nil {
|
||||
suite.T().Fatalf("failed to wait for CephCluster to be HEALTH_OK: %v", err)
|
||||
}
|
||||
|
||||
suite.Require().NoError(suite.RunFIOTest(ctx, "ceph-block", "10G"))
|
||||
}
|
||||
|
||||
func init() {
|
||||
allSuites = append(allSuites, new(RookSuite))
|
||||
}
|
17
internal/integration/k8s/testdata/rook-ceph-cluster-values.yaml
vendored
Normal file
17
internal/integration/k8s/testdata/rook-ceph-cluster-values.yaml
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
operatorNamespace: rook-ceph
|
||||
cephFileSystems: []
|
||||
cephObjectStores: []
|
||||
cephClusterSpec:
|
||||
crashCollector:
|
||||
disable: true
|
||||
logCollector:
|
||||
enabled: false
|
||||
dashboard:
|
||||
enabled: false
|
||||
resources:
|
||||
osd:
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
requests:
|
||||
cpu: "500m"
|
||||
memory: "1Gi"
|
Loading…
Reference in New Issue
Block a user