fix: update CoreDNS health check

The fix in #9233 wasn't correct, as it was looking for number of
replicas in a "random" ReplicaSet. If the deployment has multiple
replica sets, it leads to unexpected results.

Instead, read the Deployment resource directly.

Signed-off-by: Andrey Smirnov <andrey.smirnov@siderolabs.com>
(cherry picked from commit 780a1f198a5eedd33a27060bdf116bd3a3b26426)
This commit is contained in:
Andrey Smirnov 2024-09-12 16:07:03 +04:00
parent a159ea9ccc
commit cdabb7bcf7
No known key found for this signature in database
GPG Key ID: FE042E3D4085A811
2 changed files with 9 additions and 10 deletions

View File

@ -45,7 +45,7 @@ func DefaultClusterChecks() []ClusterCheck {
// wait for coredns to report ready
func(cluster ClusterInfo) conditions.Condition {
return conditions.PollingCondition("coredns to report ready", func(ctx context.Context) error {
present, replicas, err := ReplicaSetPresent(ctx, cluster, "kube-system", "k8s-app=kube-dns")
present, replicas, err := DeploymentPresent(ctx, cluster, "kube-system", "k8s-app=kube-dns")
if err != nil {
return err
}

View File

@ -14,6 +14,7 @@ import (
"github.com/cosi-project/runtime/pkg/safe"
"github.com/siderolabs/gen/maps"
"github.com/siderolabs/go-pointer"
"google.golang.org/grpc/codes"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -356,8 +357,6 @@ func K8sPodReadyAssertion(ctx context.Context, cluster cluster.K8sProvider, repl
}
// DaemonSetPresent returns true if there is at least one DaemonSet matching given label selector.
//
//nolint:dupl
func DaemonSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, int, error) {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
@ -378,27 +377,27 @@ func DaemonSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespac
return true, int(dss.Items[0].Status.DesiredNumberScheduled), nil
}
// ReplicaSetPresent returns true if there is at least one ReplicaSet matching given label selector.
//
//nolint:dupl
func ReplicaSetPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, int, error) {
// DeploymentPresent returns true if there is at least one ReplicaSet matching given label selector.
func DeploymentPresent(ctx context.Context, cluster cluster.K8sProvider, namespace, labelSelector string) (bool, int, error) {
clientset, err := cluster.K8sClient(ctx)
if err != nil {
return false, 0, err
}
rss, err := clientset.AppsV1().ReplicaSets(namespace).List(ctx, metav1.ListOptions{
deployments, err := clientset.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return false, 0, err
}
if len(rss.Items) == 0 {
if len(deployments.Items) == 0 {
return false, 0, nil
}
return true, int(rss.Items[0].Status.Replicas), nil
deployment := deployments.Items[0]
return true, int(pointer.SafeDeref(deployment.Spec.Replicas)), nil
}
// K8sControlPlaneStaticPods checks whether all the controlplane nodes are running required Kubernetes static pods.